1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/ia32/codegen-ia32.h"
6
7#if V8_TARGET_ARCH_IA32
8
9#include "src/codegen.h"
10#include "src/heap/heap.h"
11#include "src/macro-assembler.h"
12
13namespace v8 {
14namespace internal {
15
16
17// -------------------------------------------------------------------------
18// Platform-specific RuntimeCallHelper functions.
19
20void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
21  masm->EnterFrame(StackFrame::INTERNAL);
22  DCHECK(!masm->has_frame());
23  masm->set_has_frame(true);
24}
25
26
27void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
28  masm->LeaveFrame(StackFrame::INTERNAL);
29  DCHECK(masm->has_frame());
30  masm->set_has_frame(false);
31}
32
33
34#define __ masm.
35
36
37UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
38  size_t actual_size;
39  // Allocate buffer in executable space.
40  byte* buffer =
41      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
42  if (buffer == nullptr) return nullptr;
43  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
44                      CodeObjectRequired::kNo);
45  // esp[1 * kPointerSize]: raw double input
46  // esp[0 * kPointerSize]: return address
47  // Move double input into registers.
48  {
49    __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
50    __ sqrtsd(xmm0, xmm0);
51    __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
52    // Load result into floating point register as return value.
53    __ fld_d(Operand(esp, 1 * kPointerSize));
54    __ Ret();
55  }
56
57  CodeDesc desc;
58  masm.GetCode(&desc);
59  DCHECK(!RelocInfo::RequiresRelocation(desc));
60
61  Assembler::FlushICache(isolate, buffer, actual_size);
62  base::OS::ProtectCode(buffer, actual_size);
63  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
64}
65
66
67// Helper functions for CreateMemMoveFunction.
68#undef __
69#define __ ACCESS_MASM(masm)
70
71enum Direction { FORWARD, BACKWARD };
72enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
73
74// Expects registers:
75// esi - source, aligned if alignment == ALIGNED
76// edi - destination, always aligned
77// ecx - count (copy size in bytes)
78// edx - loop count (number of 64 byte chunks)
79void MemMoveEmitMainLoop(MacroAssembler* masm,
80                         Label* move_last_15,
81                         Direction direction,
82                         Alignment alignment) {
83  Register src = esi;
84  Register dst = edi;
85  Register count = ecx;
86  Register loop_count = edx;
87  Label loop, move_last_31, move_last_63;
88  __ cmp(loop_count, 0);
89  __ j(equal, &move_last_63);
90  __ bind(&loop);
91  // Main loop. Copy in 64 byte chunks.
92  if (direction == BACKWARD) __ sub(src, Immediate(0x40));
93  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
94  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
95  __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
96  __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
97  if (direction == FORWARD) __ add(src, Immediate(0x40));
98  if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
99  __ movdqa(Operand(dst, 0x00), xmm0);
100  __ movdqa(Operand(dst, 0x10), xmm1);
101  __ movdqa(Operand(dst, 0x20), xmm2);
102  __ movdqa(Operand(dst, 0x30), xmm3);
103  if (direction == FORWARD) __ add(dst, Immediate(0x40));
104  __ dec(loop_count);
105  __ j(not_zero, &loop);
106  // At most 63 bytes left to copy.
107  __ bind(&move_last_63);
108  __ test(count, Immediate(0x20));
109  __ j(zero, &move_last_31);
110  if (direction == BACKWARD) __ sub(src, Immediate(0x20));
111  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
112  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
113  if (direction == FORWARD) __ add(src, Immediate(0x20));
114  if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
115  __ movdqa(Operand(dst, 0x00), xmm0);
116  __ movdqa(Operand(dst, 0x10), xmm1);
117  if (direction == FORWARD) __ add(dst, Immediate(0x20));
118  // At most 31 bytes left to copy.
119  __ bind(&move_last_31);
120  __ test(count, Immediate(0x10));
121  __ j(zero, move_last_15);
122  if (direction == BACKWARD) __ sub(src, Immediate(0x10));
123  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
124  if (direction == FORWARD) __ add(src, Immediate(0x10));
125  if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
126  __ movdqa(Operand(dst, 0), xmm0);
127  if (direction == FORWARD) __ add(dst, Immediate(0x10));
128}
129
130
131void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
132  __ pop(esi);
133  __ pop(edi);
134  __ ret(0);
135}
136
137
138#undef __
139#define __ masm.
140
141
142class LabelConverter {
143 public:
144  explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
145  int32_t address(Label* l) const {
146    return reinterpret_cast<int32_t>(buffer_) + l->pos();
147  }
148 private:
149  byte* buffer_;
150};
151
152
153MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
154  size_t actual_size;
155  // Allocate buffer in executable space.
156  byte* buffer =
157      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
158  if (buffer == nullptr) return nullptr;
159  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
160                      CodeObjectRequired::kNo);
161  LabelConverter conv(buffer);
162
163  // Generated code is put into a fixed, unmovable buffer, and not into
164  // the V8 heap. We can't, and don't, refer to any relocatable addresses
165  // (e.g. the JavaScript nan-object).
166
167  // 32-bit C declaration function calls pass arguments on stack.
168
169  // Stack layout:
170  // esp[12]: Third argument, size.
171  // esp[8]: Second argument, source pointer.
172  // esp[4]: First argument, destination pointer.
173  // esp[0]: return address
174
175  const int kDestinationOffset = 1 * kPointerSize;
176  const int kSourceOffset = 2 * kPointerSize;
177  const int kSizeOffset = 3 * kPointerSize;
178
179  // When copying up to this many bytes, use special "small" handlers.
180  const size_t kSmallCopySize = 8;
181  // When copying up to this many bytes, use special "medium" handlers.
182  const size_t kMediumCopySize = 63;
183  // When non-overlapping region of src and dst is less than this,
184  // use a more careful implementation (slightly slower).
185  const size_t kMinMoveDistance = 16;
186  // Note that these values are dictated by the implementation below,
187  // do not just change them and hope things will work!
188
189  int stack_offset = 0;  // Update if we change the stack height.
190
191  Label backward, backward_much_overlap;
192  Label forward_much_overlap, small_size, medium_size, pop_and_return;
193  __ push(edi);
194  __ push(esi);
195  stack_offset += 2 * kPointerSize;
196  Register dst = edi;
197  Register src = esi;
198  Register count = ecx;
199  Register loop_count = edx;
200  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
201  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
202  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
203
204  __ cmp(dst, src);
205  __ j(equal, &pop_and_return);
206
207  __ prefetch(Operand(src, 0), 1);
208  __ cmp(count, kSmallCopySize);
209  __ j(below_equal, &small_size);
210  __ cmp(count, kMediumCopySize);
211  __ j(below_equal, &medium_size);
212  __ cmp(dst, src);
213  __ j(above, &backward);
214
215  {
216    // |dst| is a lower address than |src|. Copy front-to-back.
217    Label unaligned_source, move_last_15, skip_last_move;
218    __ mov(eax, src);
219    __ sub(eax, dst);
220    __ cmp(eax, kMinMoveDistance);
221    __ j(below, &forward_much_overlap);
222    // Copy first 16 bytes.
223    __ movdqu(xmm0, Operand(src, 0));
224    __ movdqu(Operand(dst, 0), xmm0);
225    // Determine distance to alignment: 16 - (dst & 0xF).
226    __ mov(edx, dst);
227    __ and_(edx, 0xF);
228    __ neg(edx);
229    __ add(edx, Immediate(16));
230    __ add(dst, edx);
231    __ add(src, edx);
232    __ sub(count, edx);
233    // dst is now aligned. Main copy loop.
234    __ mov(loop_count, count);
235    __ shr(loop_count, 6);
236    // Check if src is also aligned.
237    __ test(src, Immediate(0xF));
238    __ j(not_zero, &unaligned_source);
239    // Copy loop for aligned source and destination.
240    MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
241    // At most 15 bytes to copy. Copy 16 bytes at end of string.
242    __ bind(&move_last_15);
243    __ and_(count, 0xF);
244    __ j(zero, &skip_last_move, Label::kNear);
245    __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
246    __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
247    __ bind(&skip_last_move);
248    MemMoveEmitPopAndReturn(&masm);
249
250    // Copy loop for unaligned source and aligned destination.
251    __ bind(&unaligned_source);
252    MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
253    __ jmp(&move_last_15);
254
255    // Less than kMinMoveDistance offset between dst and src.
256    Label loop_until_aligned, last_15_much_overlap;
257    __ bind(&loop_until_aligned);
258    __ mov_b(eax, Operand(src, 0));
259    __ inc(src);
260    __ mov_b(Operand(dst, 0), eax);
261    __ inc(dst);
262    __ dec(count);
263    __ bind(&forward_much_overlap);  // Entry point into this block.
264    __ test(dst, Immediate(0xF));
265    __ j(not_zero, &loop_until_aligned);
266    // dst is now aligned, src can't be. Main copy loop.
267    __ mov(loop_count, count);
268    __ shr(loop_count, 6);
269    MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
270                        FORWARD, MOVE_UNALIGNED);
271    __ bind(&last_15_much_overlap);
272    __ and_(count, 0xF);
273    __ j(zero, &pop_and_return);
274    __ cmp(count, kSmallCopySize);
275    __ j(below_equal, &small_size);
276    __ jmp(&medium_size);
277  }
278
279  {
280    // |dst| is a higher address than |src|. Copy backwards.
281    Label unaligned_source, move_first_15, skip_last_move;
282    __ bind(&backward);
283    // |dst| and |src| always point to the end of what's left to copy.
284    __ add(dst, count);
285    __ add(src, count);
286    __ mov(eax, dst);
287    __ sub(eax, src);
288    __ cmp(eax, kMinMoveDistance);
289    __ j(below, &backward_much_overlap);
290    // Copy last 16 bytes.
291    __ movdqu(xmm0, Operand(src, -0x10));
292    __ movdqu(Operand(dst, -0x10), xmm0);
293    // Find distance to alignment: dst & 0xF
294    __ mov(edx, dst);
295    __ and_(edx, 0xF);
296    __ sub(dst, edx);
297    __ sub(src, edx);
298    __ sub(count, edx);
299    // dst is now aligned. Main copy loop.
300    __ mov(loop_count, count);
301    __ shr(loop_count, 6);
302    // Check if src is also aligned.
303    __ test(src, Immediate(0xF));
304    __ j(not_zero, &unaligned_source);
305    // Copy loop for aligned source and destination.
306    MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
307    // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
308    __ bind(&move_first_15);
309    __ and_(count, 0xF);
310    __ j(zero, &skip_last_move, Label::kNear);
311    __ sub(src, count);
312    __ sub(dst, count);
313    __ movdqu(xmm0, Operand(src, 0));
314    __ movdqu(Operand(dst, 0), xmm0);
315    __ bind(&skip_last_move);
316    MemMoveEmitPopAndReturn(&masm);
317
318    // Copy loop for unaligned source and aligned destination.
319    __ bind(&unaligned_source);
320    MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
321    __ jmp(&move_first_15);
322
323    // Less than kMinMoveDistance offset between dst and src.
324    Label loop_until_aligned, first_15_much_overlap;
325    __ bind(&loop_until_aligned);
326    __ dec(src);
327    __ dec(dst);
328    __ mov_b(eax, Operand(src, 0));
329    __ mov_b(Operand(dst, 0), eax);
330    __ dec(count);
331    __ bind(&backward_much_overlap);  // Entry point into this block.
332    __ test(dst, Immediate(0xF));
333    __ j(not_zero, &loop_until_aligned);
334    // dst is now aligned, src can't be. Main copy loop.
335    __ mov(loop_count, count);
336    __ shr(loop_count, 6);
337    MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
338                        BACKWARD, MOVE_UNALIGNED);
339    __ bind(&first_15_much_overlap);
340    __ and_(count, 0xF);
341    __ j(zero, &pop_and_return);
342    // Small/medium handlers expect dst/src to point to the beginning.
343    __ sub(dst, count);
344    __ sub(src, count);
345    __ cmp(count, kSmallCopySize);
346    __ j(below_equal, &small_size);
347    __ jmp(&medium_size);
348  }
349  {
350    // Special handlers for 9 <= copy_size < 64. No assumptions about
351    // alignment or move distance, so all reads must be unaligned and
352    // must happen before any writes.
353    Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
354
355    __ bind(&f9_16);
356    __ movsd(xmm0, Operand(src, 0));
357    __ movsd(xmm1, Operand(src, count, times_1, -8));
358    __ movsd(Operand(dst, 0), xmm0);
359    __ movsd(Operand(dst, count, times_1, -8), xmm1);
360    MemMoveEmitPopAndReturn(&masm);
361
362    __ bind(&f17_32);
363    __ movdqu(xmm0, Operand(src, 0));
364    __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
365    __ movdqu(Operand(dst, 0x00), xmm0);
366    __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
367    MemMoveEmitPopAndReturn(&masm);
368
369    __ bind(&f33_48);
370    __ movdqu(xmm0, Operand(src, 0x00));
371    __ movdqu(xmm1, Operand(src, 0x10));
372    __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
373    __ movdqu(Operand(dst, 0x00), xmm0);
374    __ movdqu(Operand(dst, 0x10), xmm1);
375    __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
376    MemMoveEmitPopAndReturn(&masm);
377
378    __ bind(&f49_63);
379    __ movdqu(xmm0, Operand(src, 0x00));
380    __ movdqu(xmm1, Operand(src, 0x10));
381    __ movdqu(xmm2, Operand(src, 0x20));
382    __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
383    __ movdqu(Operand(dst, 0x00), xmm0);
384    __ movdqu(Operand(dst, 0x10), xmm1);
385    __ movdqu(Operand(dst, 0x20), xmm2);
386    __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
387    MemMoveEmitPopAndReturn(&masm);
388
389    __ bind(&medium_handlers);
390    __ dd(conv.address(&f9_16));
391    __ dd(conv.address(&f17_32));
392    __ dd(conv.address(&f33_48));
393    __ dd(conv.address(&f49_63));
394
395    __ bind(&medium_size);  // Entry point into this block.
396    __ mov(eax, count);
397    __ dec(eax);
398    __ shr(eax, 4);
399    if (FLAG_debug_code) {
400      Label ok;
401      __ cmp(eax, 3);
402      __ j(below_equal, &ok);
403      __ int3();
404      __ bind(&ok);
405    }
406    __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
407    __ jmp(eax);
408  }
409  {
410    // Specialized copiers for copy_size <= 8 bytes.
411    Label small_handlers, f0, f1, f2, f3, f4, f5_8;
412    __ bind(&f0);
413    MemMoveEmitPopAndReturn(&masm);
414
415    __ bind(&f1);
416    __ mov_b(eax, Operand(src, 0));
417    __ mov_b(Operand(dst, 0), eax);
418    MemMoveEmitPopAndReturn(&masm);
419
420    __ bind(&f2);
421    __ mov_w(eax, Operand(src, 0));
422    __ mov_w(Operand(dst, 0), eax);
423    MemMoveEmitPopAndReturn(&masm);
424
425    __ bind(&f3);
426    __ mov_w(eax, Operand(src, 0));
427    __ mov_b(edx, Operand(src, 2));
428    __ mov_w(Operand(dst, 0), eax);
429    __ mov_b(Operand(dst, 2), edx);
430    MemMoveEmitPopAndReturn(&masm);
431
432    __ bind(&f4);
433    __ mov(eax, Operand(src, 0));
434    __ mov(Operand(dst, 0), eax);
435    MemMoveEmitPopAndReturn(&masm);
436
437    __ bind(&f5_8);
438    __ mov(eax, Operand(src, 0));
439    __ mov(edx, Operand(src, count, times_1, -4));
440    __ mov(Operand(dst, 0), eax);
441    __ mov(Operand(dst, count, times_1, -4), edx);
442    MemMoveEmitPopAndReturn(&masm);
443
444    __ bind(&small_handlers);
445    __ dd(conv.address(&f0));
446    __ dd(conv.address(&f1));
447    __ dd(conv.address(&f2));
448    __ dd(conv.address(&f3));
449    __ dd(conv.address(&f4));
450    __ dd(conv.address(&f5_8));
451    __ dd(conv.address(&f5_8));
452    __ dd(conv.address(&f5_8));
453    __ dd(conv.address(&f5_8));
454
455    __ bind(&small_size);  // Entry point into this block.
456    if (FLAG_debug_code) {
457      Label ok;
458      __ cmp(count, 8);
459      __ j(below_equal, &ok);
460      __ int3();
461      __ bind(&ok);
462    }
463    __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
464    __ jmp(eax);
465  }
466
467  __ bind(&pop_and_return);
468  MemMoveEmitPopAndReturn(&masm);
469
470  CodeDesc desc;
471  masm.GetCode(&desc);
472  DCHECK(!RelocInfo::RequiresRelocation(desc));
473  Assembler::FlushICache(isolate, buffer, actual_size);
474  base::OS::ProtectCode(buffer, actual_size);
475  // TODO(jkummerow): It would be nice to register this code creation event
476  // with the PROFILE / GDBJIT system.
477  return FUNCTION_CAST<MemMoveFunction>(buffer);
478}
479
480
481#undef __
482
483// -------------------------------------------------------------------------
484// Code generators
485
486#define __ ACCESS_MASM(masm)
487
488void StringCharLoadGenerator::Generate(MacroAssembler* masm,
489                                       Factory* factory,
490                                       Register string,
491                                       Register index,
492                                       Register result,
493                                       Label* call_runtime) {
494  Label indirect_string_loaded;
495  __ bind(&indirect_string_loaded);
496
497  // Fetch the instance type of the receiver into result register.
498  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
499  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
500
501  // We need special handling for indirect strings.
502  Label check_sequential;
503  __ test(result, Immediate(kIsIndirectStringMask));
504  __ j(zero, &check_sequential, Label::kNear);
505
506  // Dispatch on the indirect string shape: slice or cons.
507  Label cons_string, thin_string;
508  __ and_(result, Immediate(kStringRepresentationMask));
509  __ cmp(result, Immediate(kConsStringTag));
510  __ j(equal, &cons_string, Label::kNear);
511  __ cmp(result, Immediate(kThinStringTag));
512  __ j(equal, &thin_string, Label::kNear);
513
514  // Handle slices.
515  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
516  __ SmiUntag(result);
517  __ add(index, result);
518  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
519  __ jmp(&indirect_string_loaded);
520
521  // Handle thin strings.
522  __ bind(&thin_string);
523  __ mov(string, FieldOperand(string, ThinString::kActualOffset));
524  __ jmp(&indirect_string_loaded);
525
526  // Handle cons strings.
527  // Check whether the right hand side is the empty string (i.e. if
528  // this is really a flat string in a cons string). If that is not
529  // the case we would rather go to the runtime system now to flatten
530  // the string.
531  __ bind(&cons_string);
532  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
533         Immediate(factory->empty_string()));
534  __ j(not_equal, call_runtime);
535  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
536  __ jmp(&indirect_string_loaded);
537
538  // Distinguish sequential and external strings. Only these two string
539  // representations can reach here (slices and flat cons strings have been
540  // reduced to the underlying sequential or external string).
541  Label seq_string;
542  __ bind(&check_sequential);
543  STATIC_ASSERT(kSeqStringTag == 0);
544  __ test(result, Immediate(kStringRepresentationMask));
545  __ j(zero, &seq_string, Label::kNear);
546
547  // Handle external strings.
548  Label one_byte_external, done;
549  if (FLAG_debug_code) {
550    // Assert that we do not have a cons or slice (indirect strings) here.
551    // Sequential strings have already been ruled out.
552    __ test(result, Immediate(kIsIndirectStringMask));
553    __ Assert(zero, kExternalStringExpectedButNotFound);
554  }
555  // Rule out short external strings.
556  STATIC_ASSERT(kShortExternalStringTag != 0);
557  __ test_b(result, Immediate(kShortExternalStringMask));
558  __ j(not_zero, call_runtime);
559  // Check encoding.
560  STATIC_ASSERT(kTwoByteStringTag == 0);
561  __ test_b(result, Immediate(kStringEncodingMask));
562  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
563  __ j(not_equal, &one_byte_external, Label::kNear);
564  // Two-byte string.
565  __ movzx_w(result, Operand(result, index, times_2, 0));
566  __ jmp(&done, Label::kNear);
567  __ bind(&one_byte_external);
568  // One-byte string.
569  __ movzx_b(result, Operand(result, index, times_1, 0));
570  __ jmp(&done, Label::kNear);
571
572  // Dispatch on the encoding: one-byte or two-byte.
573  Label one_byte;
574  __ bind(&seq_string);
575  STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
576  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
577  __ test(result, Immediate(kStringEncodingMask));
578  __ j(not_zero, &one_byte, Label::kNear);
579
580  // Two-byte string.
581  // Load the two-byte character code into the result register.
582  __ movzx_w(result, FieldOperand(string,
583                                  index,
584                                  times_2,
585                                  SeqTwoByteString::kHeaderSize));
586  __ jmp(&done, Label::kNear);
587
588  // One-byte string.
589  // Load the byte into the result register.
590  __ bind(&one_byte);
591  __ movzx_b(result, FieldOperand(string,
592                                  index,
593                                  times_1,
594                                  SeqOneByteString::kHeaderSize));
595  __ bind(&done);
596}
597
598#undef __
599
600
601CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
602  USE(isolate);
603  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
604  CodePatcher patcher(isolate, young_sequence_.start(),
605                      young_sequence_.length());
606  patcher.masm()->push(ebp);
607  patcher.masm()->mov(ebp, esp);
608  patcher.masm()->push(esi);
609  patcher.masm()->push(edi);
610}
611
612
613#ifdef DEBUG
614bool CodeAgingHelper::IsOld(byte* candidate) const {
615  return *candidate == kCallOpcode;
616}
617#endif
618
619
620bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
621  bool result = isolate->code_aging_helper()->IsYoung(sequence);
622  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
623  return result;
624}
625
626Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
627  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
628
629  sequence++;  // Skip the kCallOpcode byte
630  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
631                           Assembler::kCallTargetAddressOffset;
632  Code* stub = GetCodeFromTargetAddress(target_address);
633  return GetAgeOfCodeAgeStub(stub);
634}
635
636void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
637                                Code::Age age) {
638  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
639  if (age == kNoAgeCodeAge) {
640    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
641    Assembler::FlushICache(isolate, sequence, young_length);
642  } else {
643    Code* stub = GetCodeAgeStub(isolate, age);
644    CodePatcher patcher(isolate, sequence, young_length);
645    patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
646  }
647}
648
649
650}  // namespace internal
651}  // namespace v8
652
653#endif  // V8_TARGET_ARCH_IA32
654