ic-arm.cc revision 053d10c438f14580aaf4ab1b2aad93a5a4fe8b82
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "assembler-arm.h"
33#include "code-stubs.h"
34#include "codegen.h"
35#include "disasm.h"
36#include "ic-inl.h"
37#include "runtime.h"
38#include "stub-cache.h"
39
40namespace v8 {
41namespace internal {
42
43
44// ----------------------------------------------------------------------------
45// Static IC stub generators.
46//
47
48#define __ ACCESS_MASM(masm)
49
50
51static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52                                            Register type,
53                                            Label* global_object) {
54  // Register usage:
55  //   type: holds the receiver instance type on entry.
56  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
57  __ b(eq, global_object);
58  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
59  __ b(eq, global_object);
60  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
61  __ b(eq, global_object);
62}
63
64
65// Generated code falls through if the receiver is a regular non-global
66// JS object with slow properties and no interceptors.
67static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
68                                                  Register receiver,
69                                                  Register elements,
70                                                  Register t0,
71                                                  Register t1,
72                                                  Label* miss) {
73  // Register usage:
74  //   receiver: holds the receiver on entry and is unchanged.
75  //   elements: holds the property dictionary on fall through.
76  // Scratch registers:
77  //   t0: used to holds the receiver map.
78  //   t1: used to holds the receiver instance type, receiver bit mask and
79  //       elements map.
80
81  // Check that the receiver isn't a smi.
82  __ tst(receiver, Operand(kSmiTagMask));
83  __ b(eq, miss);
84
85  // Check that the receiver is a valid JS object.
86  __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
87  __ b(lt, miss);
88
89  // If this assert fails, we have to check upper bound too.
90  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
91
92  GenerateGlobalInstanceTypeCheck(masm, t1, miss);
93
94  // Check that the global object does not require access checks.
95  __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
96  __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
97                     (1 << Map::kHasNamedInterceptor)));
98  __ b(ne, miss);
99
100  __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
101  __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
102  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
103  __ cmp(t1, ip);
104  __ b(ne, miss);
105}
106
107
108// Probe the string dictionary in the |elements| register. Jump to the
109// |done| label if a property with the given name is found. Jump to
110// the |miss| label otherwise.
111static void GenerateStringDictionaryProbes(MacroAssembler* masm,
112                                           Label* miss,
113                                           Label* done,
114                                           Register elements,
115                                           Register name,
116                                           Register scratch1,
117                                           Register scratch2) {
118  // Assert that name contains a string.
119  if (FLAG_debug_code) __ AbortIfNotString(name);
120
121  // Compute the capacity mask.
122  const int kCapacityOffset = StringDictionary::kHeaderSize +
123      StringDictionary::kCapacityIndex * kPointerSize;
124  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
125  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
126  __ sub(scratch1, scratch1, Operand(1));
127
128  const int kElementsStartOffset = StringDictionary::kHeaderSize +
129      StringDictionary::kElementsStartIndex * kPointerSize;
130
131  // Generate an unrolled loop that performs a few probes before
132  // giving up. Measurements done on Gmail indicate that 2 probes
133  // cover ~93% of loads from dictionaries.
134  static const int kProbes = 4;
135  for (int i = 0; i < kProbes; i++) {
136    // Compute the masked index: (hash + i + i * i) & mask.
137    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
138    if (i > 0) {
139      // Add the probe offset (i + i * i) left shifted to avoid right shifting
140      // the hash in a separate instruction. The value hash + i + i * i is right
141      // shifted in the following and instruction.
142      ASSERT(StringDictionary::GetProbeOffset(i) <
143             1 << (32 - String::kHashFieldOffset));
144      __ add(scratch2, scratch2, Operand(
145          StringDictionary::GetProbeOffset(i) << String::kHashShift));
146    }
147    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
148
149    // Scale the index by multiplying by the element size.
150    ASSERT(StringDictionary::kEntrySize == 3);
151    // scratch2 = scratch2 * 3.
152    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
153
154    // Check if the key is identical to the name.
155    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
156    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
157    __ cmp(name, Operand(ip));
158    if (i != kProbes - 1) {
159      __ b(eq, done);
160    } else {
161      __ b(ne, miss);
162    }
163  }
164}
165
166
167// Helper function used from LoadIC/CallIC GenerateNormal.
168//
169// elements: Property dictionary. It is not clobbered if a jump to the miss
170//           label is done.
171// name:     Property name. It is not clobbered if a jump to the miss label is
172//           done
173// result:   Register for the result. It is only updated if a jump to the miss
174//           label is not done. Can be the same as elements or name clobbering
175//           one of these in the case of not jumping to the miss label.
176// The two scratch registers need to be different from elements, name and
177// result.
178// The generated code assumes that the receiver has slow properties,
179// is not a global object and does not have interceptors.
180static void GenerateDictionaryLoad(MacroAssembler* masm,
181                                   Label* miss,
182                                   Register elements,
183                                   Register name,
184                                   Register result,
185                                   Register scratch1,
186                                   Register scratch2) {
187  // Main use of the scratch registers.
188  // scratch1: Used as temporary and to hold the capacity of the property
189  //           dictionary.
190  // scratch2: Used as temporary.
191  Label done;
192
193  // Probe the dictionary.
194  GenerateStringDictionaryProbes(masm,
195                                 miss,
196                                 &done,
197                                 elements,
198                                 name,
199                                 scratch1,
200                                 scratch2);
201
202  // If probing finds an entry check that the value is a normal
203  // property.
204  __ bind(&done);  // scratch2 == elements + 4 * index
205  const int kElementsStartOffset = StringDictionary::kHeaderSize +
206      StringDictionary::kElementsStartIndex * kPointerSize;
207  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
208  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
209  __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
210  __ b(ne, miss);
211
212  // Get the value at the masked, scaled index and return.
213  __ ldr(result,
214         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
215}
216
217
218// Helper function used from StoreIC::GenerateNormal.
219//
220// elements: Property dictionary. It is not clobbered if a jump to the miss
221//           label is done.
222// name:     Property name. It is not clobbered if a jump to the miss label is
223//           done
224// value:    The value to store.
225// The two scratch registers need to be different from elements, name and
226// result.
227// The generated code assumes that the receiver has slow properties,
228// is not a global object and does not have interceptors.
229static void GenerateDictionaryStore(MacroAssembler* masm,
230                                    Label* miss,
231                                    Register elements,
232                                    Register name,
233                                    Register value,
234                                    Register scratch1,
235                                    Register scratch2) {
236  // Main use of the scratch registers.
237  // scratch1: Used as temporary and to hold the capacity of the property
238  //           dictionary.
239  // scratch2: Used as temporary.
240  Label done;
241
242  // Probe the dictionary.
243  GenerateStringDictionaryProbes(masm,
244                                 miss,
245                                 &done,
246                                 elements,
247                                 name,
248                                 scratch1,
249                                 scratch2);
250
251  // If probing finds an entry in the dictionary check that the value
252  // is a normal property that is not read only.
253  __ bind(&done);  // scratch2 == elements + 4 * index
254  const int kElementsStartOffset = StringDictionary::kHeaderSize +
255      StringDictionary::kElementsStartIndex * kPointerSize;
256  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
257  const int kTypeAndReadOnlyMask
258      = (PropertyDetails::TypeField::mask() |
259         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
260  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
261  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
262  __ b(ne, miss);
263
264  // Store the value at the masked, scaled index and return.
265  const int kValueOffset = kElementsStartOffset + kPointerSize;
266  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
267  __ str(value, MemOperand(scratch2));
268
269  // Update the write barrier. Make sure not to clobber the value.
270  __ mov(scratch1, value);
271  __ RecordWrite(elements, scratch2, scratch1);
272}
273
274
275static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
276                                         Label* miss,
277                                         Register elements,
278                                         Register key,
279                                         Register result,
280                                         Register t0,
281                                         Register t1,
282                                         Register t2) {
283  // Register use:
284  //
285  // elements - holds the slow-case elements of the receiver on entry.
286  //            Unchanged unless 'result' is the same register.
287  //
288  // key      - holds the smi key on entry.
289  //            Unchanged unless 'result' is the same register.
290  //
291  // result   - holds the result on exit if the load succeeded.
292  //            Allowed to be the same as 'key' or 'result'.
293  //            Unchanged on bailout so 'key' or 'result' can be used
294  //            in further computation.
295  //
296  // Scratch registers:
297  //
298  // t0 - holds the untagged key on entry and holds the hash once computed.
299  //
300  // t1 - used to hold the capacity mask of the dictionary
301  //
302  // t2 - used for the index into the dictionary.
303  Label done;
304
305  // Compute the hash code from the untagged key.  This must be kept in sync
306  // with ComputeIntegerHash in utils.h.
307  //
308  // hash = ~hash + (hash << 15);
309  __ mvn(t1, Operand(t0));
310  __ add(t0, t1, Operand(t0, LSL, 15));
311  // hash = hash ^ (hash >> 12);
312  __ eor(t0, t0, Operand(t0, LSR, 12));
313  // hash = hash + (hash << 2);
314  __ add(t0, t0, Operand(t0, LSL, 2));
315  // hash = hash ^ (hash >> 4);
316  __ eor(t0, t0, Operand(t0, LSR, 4));
317  // hash = hash * 2057;
318  __ mov(t1, Operand(2057));
319  __ mul(t0, t0, t1);
320  // hash = hash ^ (hash >> 16);
321  __ eor(t0, t0, Operand(t0, LSR, 16));
322
323  // Compute the capacity mask.
324  __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
325  __ mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
326  __ sub(t1, t1, Operand(1));
327
328  // Generate an unrolled loop that performs a few probes before giving up.
329  static const int kProbes = 4;
330  for (int i = 0; i < kProbes; i++) {
331    // Use t2 for index calculations and keep the hash intact in t0.
332    __ mov(t2, t0);
333    // Compute the masked index: (hash + i + i * i) & mask.
334    if (i > 0) {
335      __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
336    }
337    __ and_(t2, t2, Operand(t1));
338
339    // Scale the index by multiplying by the element size.
340    ASSERT(NumberDictionary::kEntrySize == 3);
341    __ add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
342
343    // Check if the key is identical to the name.
344    __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
345    __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
346    __ cmp(key, Operand(ip));
347    if (i != kProbes - 1) {
348      __ b(eq, &done);
349    } else {
350      __ b(ne, miss);
351    }
352  }
353
354  __ bind(&done);
355  // Check that the value is a normal property.
356  // t2: elements + (index * kPointerSize)
357  const int kDetailsOffset =
358      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
359  __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
360  __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
361  __ b(ne, miss);
362
363  // Get the value at the masked, scaled index and return.
364  const int kValueOffset =
365      NumberDictionary::kElementsStartOffset + kPointerSize;
366  __ ldr(result, FieldMemOperand(t2, kValueOffset));
367}
368
369
370void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
371  // ----------- S t a t e -------------
372  //  -- r2    : name
373  //  -- lr    : return address
374  //  -- r0    : receiver
375  //  -- sp[0] : receiver
376  // -----------------------------------
377  Label miss;
378
379  StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
380  __ bind(&miss);
381  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
382}
383
384
385void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
386  // ----------- S t a t e -------------
387  //  -- r2    : name
388  //  -- lr    : return address
389  //  -- r0    : receiver
390  //  -- sp[0] : receiver
391  // -----------------------------------
392  Label miss;
393
394  StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
395                                         support_wrappers);
396  // Cache miss: Jump to runtime.
397  __ bind(&miss);
398  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
399}
400
401
402void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
403  // ----------- S t a t e -------------
404  //  -- r2    : name
405  //  -- lr    : return address
406  //  -- r0    : receiver
407  //  -- sp[0] : receiver
408  // -----------------------------------
409  Label miss;
410
411  StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
412  __ bind(&miss);
413  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
414}
415
416
417// Checks the receiver for special cases (value type, slow case bits).
418// Falls through for regular JS object.
419static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
420                                           Register receiver,
421                                           Register map,
422                                           Register scratch,
423                                           int interceptor_bit,
424                                           Label* slow) {
425  // Check that the object isn't a smi.
426  __ JumpIfSmi(receiver, slow);
427  // Get the map of the receiver.
428  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
429  // Check bit field.
430  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
431  __ tst(scratch,
432         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
433  __ b(ne, slow);
434  // Check that the object is some kind of JS object EXCEPT JS Value type.
435  // In the case that the object is a value-wrapper object,
436  // we enter the runtime system to make sure that indexing into string
437  // objects work as intended.
438  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
439  __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
440  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
441  __ b(lt, slow);
442}
443
444
445// Loads an indexed element from a fast case array.
446// If not_fast_array is NULL, doesn't perform the elements map check.
447static void GenerateFastArrayLoad(MacroAssembler* masm,
448                                  Register receiver,
449                                  Register key,
450                                  Register elements,
451                                  Register scratch1,
452                                  Register scratch2,
453                                  Register result,
454                                  Label* not_fast_array,
455                                  Label* out_of_range) {
456  // Register use:
457  //
458  // receiver - holds the receiver on entry.
459  //            Unchanged unless 'result' is the same register.
460  //
461  // key      - holds the smi key on entry.
462  //            Unchanged unless 'result' is the same register.
463  //
464  // elements - holds the elements of the receiver on exit.
465  //
466  // result   - holds the result on exit if the load succeeded.
467  //            Allowed to be the the same as 'receiver' or 'key'.
468  //            Unchanged on bailout so 'receiver' and 'key' can be safely
469  //            used by further computation.
470  //
471  // Scratch registers:
472  //
473  // scratch1 - used to hold elements map and elements length.
474  //            Holds the elements map if not_fast_array branch is taken.
475  //
476  // scratch2 - used to hold the loaded value.
477
478  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
479  if (not_fast_array != NULL) {
480    // Check that the object is in fast mode and writable.
481    __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
482    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
483    __ cmp(scratch1, ip);
484    __ b(ne, not_fast_array);
485  } else {
486    __ AssertFastElements(elements);
487  }
488  // Check that the key (index) is within bounds.
489  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
490  __ cmp(key, Operand(scratch1));
491  __ b(hs, out_of_range);
492  // Fast case: Do the load.
493  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
494  // The key is a smi.
495  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
496  __ ldr(scratch2,
497         MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
498  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
499  __ cmp(scratch2, ip);
500  // In case the loaded value is the_hole we have to consult GetProperty
501  // to ensure the prototype chain is searched.
502  __ b(eq, out_of_range);
503  __ mov(result, scratch2);
504}
505
506
507// Checks whether a key is an array index string or a symbol string.
508// Falls through if a key is a symbol.
509static void GenerateKeyStringCheck(MacroAssembler* masm,
510                                   Register key,
511                                   Register map,
512                                   Register hash,
513                                   Label* index_string,
514                                   Label* not_symbol) {
515  // The key is not a smi.
516  // Is it a string?
517  __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
518  __ b(ge, not_symbol);
519
520  // Is the string an array index, with cached numeric value?
521  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
522  __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
523  __ b(eq, index_string);
524
525  // Is the string a symbol?
526  // map: key map
527  __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
528  ASSERT(kSymbolTag != 0);
529  __ tst(hash, Operand(kIsSymbolMask));
530  __ b(eq, not_symbol);
531}
532
533
534// Defined in ic.cc.
535Object* CallIC_Miss(Arguments args);
536
537// The generated code does not accept smi keys.
538// The generated code falls through if both probes miss.
539static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
540                                          int argc,
541                                          Code::Kind kind) {
542  // ----------- S t a t e -------------
543  //  -- r1    : receiver
544  //  -- r2    : name
545  // -----------------------------------
546  Label number, non_number, non_string, boolean, probe, miss;
547
548  // Probe the stub cache.
549  Code::Flags flags = Code::ComputeFlags(kind,
550                                         NOT_IN_LOOP,
551                                         MONOMORPHIC,
552                                         Code::kNoExtraICState,
553                                         NORMAL,
554                                         argc);
555  Isolate::Current()->stub_cache()->GenerateProbe(
556      masm, flags, r1, r2, r3, r4, r5);
557
558  // If the stub cache probing failed, the receiver might be a value.
559  // For value objects, we use the map of the prototype objects for
560  // the corresponding JSValue for the cache and that is what we need
561  // to probe.
562  //
563  // Check for number.
564  __ tst(r1, Operand(kSmiTagMask));
565  __ b(eq, &number);
566  __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
567  __ b(ne, &non_number);
568  __ bind(&number);
569  StubCompiler::GenerateLoadGlobalFunctionPrototype(
570      masm, Context::NUMBER_FUNCTION_INDEX, r1);
571  __ b(&probe);
572
573  // Check for string.
574  __ bind(&non_number);
575  __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
576  __ b(hs, &non_string);
577  StubCompiler::GenerateLoadGlobalFunctionPrototype(
578      masm, Context::STRING_FUNCTION_INDEX, r1);
579  __ b(&probe);
580
581  // Check for boolean.
582  __ bind(&non_string);
583  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
584  __ cmp(r1, ip);
585  __ b(eq, &boolean);
586  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
587  __ cmp(r1, ip);
588  __ b(ne, &miss);
589  __ bind(&boolean);
590  StubCompiler::GenerateLoadGlobalFunctionPrototype(
591      masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
592
593  // Probe the stub cache for the value object.
594  __ bind(&probe);
595  Isolate::Current()->stub_cache()->GenerateProbe(
596      masm, flags, r1, r2, r3, r4, r5);
597
598  __ bind(&miss);
599}
600
601
602static void GenerateFunctionTailCall(MacroAssembler* masm,
603                                     int argc,
604                                     Label* miss,
605                                     Register scratch) {
606  // r1: function
607
608  // Check that the value isn't a smi.
609  __ tst(r1, Operand(kSmiTagMask));
610  __ b(eq, miss);
611
612  // Check that the value is a JSFunction.
613  __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
614  __ b(ne, miss);
615
616  // Invoke the function.
617  ParameterCount actual(argc);
618  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
619}
620
621
622static void GenerateCallNormal(MacroAssembler* masm, int argc) {
623  // ----------- S t a t e -------------
624  //  -- r2    : name
625  //  -- lr    : return address
626  // -----------------------------------
627  Label miss;
628
629  // Get the receiver of the function from the stack into r1.
630  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
631
632  GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
633
634  // r0: elements
635  // Search the dictionary - put result in register r1.
636  GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
637
638  GenerateFunctionTailCall(masm, argc, &miss, r4);
639
640  __ bind(&miss);
641}
642
643
644static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
645  // ----------- S t a t e -------------
646  //  -- r2    : name
647  //  -- lr    : return address
648  // -----------------------------------
649  Isolate* isolate = masm->isolate();
650
651  if (id == IC::kCallIC_Miss) {
652    __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
653  } else {
654    __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
655  }
656
657  // Get the receiver of the function from the stack.
658  __ ldr(r3, MemOperand(sp, argc * kPointerSize));
659
660  __ EnterInternalFrame();
661
662  // Push the receiver and the name of the function.
663  __ Push(r3, r2);
664
665  // Call the entry.
666  __ mov(r0, Operand(2));
667  __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
668
669  CEntryStub stub(1);
670  __ CallStub(&stub);
671
672  // Move result to r1 and leave the internal frame.
673  __ mov(r1, Operand(r0));
674  __ LeaveInternalFrame();
675
676  // Check if the receiver is a global object of some sort.
677  // This can happen only for regular CallIC but not KeyedCallIC.
678  if (id == IC::kCallIC_Miss) {
679    Label invoke, global;
680    __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
681    __ tst(r2, Operand(kSmiTagMask));
682    __ b(eq, &invoke);
683    __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
684    __ b(eq, &global);
685    __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
686    __ b(ne, &invoke);
687
688    // Patch the receiver on the stack.
689    __ bind(&global);
690    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
691    __ str(r2, MemOperand(sp, argc * kPointerSize));
692    __ bind(&invoke);
693  }
694
695  // Invoke the function.
696  ParameterCount actual(argc);
697  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
698}
699
700
701void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
702  // ----------- S t a t e -------------
703  //  -- r2    : name
704  //  -- lr    : return address
705  // -----------------------------------
706
707  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
708}
709
710
711void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
712  // ----------- S t a t e -------------
713  //  -- r2    : name
714  //  -- lr    : return address
715  // -----------------------------------
716
717  // Get the receiver of the function from the stack into r1.
718  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
719  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
720  GenerateMiss(masm, argc);
721}
722
723
724void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
725  // ----------- S t a t e -------------
726  //  -- r2    : name
727  //  -- lr    : return address
728  // -----------------------------------
729
730  GenerateCallNormal(masm, argc);
731  GenerateMiss(masm, argc);
732}
733
734
735void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
736  // ----------- S t a t e -------------
737  //  -- r2    : name
738  //  -- lr    : return address
739  // -----------------------------------
740
741  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
742}
743
744
745void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
746  // ----------- S t a t e -------------
747  //  -- r2    : name
748  //  -- lr    : return address
749  // -----------------------------------
750
751  // Get the receiver of the function from the stack into r1.
752  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
753
754  Label do_call, slow_call, slow_load, slow_reload_receiver;
755  Label check_number_dictionary, check_string, lookup_monomorphic_cache;
756  Label index_smi, index_string;
757
758  // Check that the key is a smi.
759  __ JumpIfNotSmi(r2, &check_string);
760  __ bind(&index_smi);
761  // Now the key is known to be a smi. This place is also jumped to from below
762  // where a numeric string is converted to a smi.
763
764  GenerateKeyedLoadReceiverCheck(
765      masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
766
767  GenerateFastArrayLoad(
768      masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
769  Counters* counters = masm->isolate()->counters();
770  __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
771
772  __ bind(&do_call);
773  // receiver in r1 is not used after this point.
774  // r2: key
775  // r1: function
776  GenerateFunctionTailCall(masm, argc, &slow_call, r0);
777
778  __ bind(&check_number_dictionary);
779  // r2: key
780  // r3: elements map
781  // r4: elements
782  // Check whether the elements is a number dictionary.
783  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
784  __ cmp(r3, ip);
785  __ b(ne, &slow_load);
786  __ mov(r0, Operand(r2, ASR, kSmiTagSize));
787  // r0: untagged index
788  GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
789  __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
790  __ jmp(&do_call);
791
792  __ bind(&slow_load);
793  // This branch is taken when calling KeyedCallIC_Miss is neither required
794  // nor beneficial.
795  __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
796  __ EnterInternalFrame();
797  __ push(r2);  // save the key
798  __ Push(r1, r2);  // pass the receiver and the key
799  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
800  __ pop(r2);  // restore the key
801  __ LeaveInternalFrame();
802  __ mov(r1, r0);
803  __ jmp(&do_call);
804
805  __ bind(&check_string);
806  GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
807
808  // The key is known to be a symbol.
809  // If the receiver is a regular JS object with slow properties then do
810  // a quick inline probe of the receiver's dictionary.
811  // Otherwise do the monomorphic cache probe.
812  GenerateKeyedLoadReceiverCheck(
813      masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
814
815  __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
816  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
817  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
818  __ cmp(r3, ip);
819  __ b(ne, &lookup_monomorphic_cache);
820
821  GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
822  __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
823  __ jmp(&do_call);
824
825  __ bind(&lookup_monomorphic_cache);
826  __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
827  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
828  // Fall through on miss.
829
830  __ bind(&slow_call);
831  // This branch is taken if:
832  // - the receiver requires boxing or access check,
833  // - the key is neither smi nor symbol,
834  // - the value loaded is not a function,
835  // - there is hope that the runtime will create a monomorphic call stub
836  //   that will get fetched next time.
837  __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
838  GenerateMiss(masm, argc);
839
840  __ bind(&index_string);
841  __ IndexFromHash(r3, r2);
842  // Now jump to the place where smi keys are handled.
843  __ jmp(&index_smi);
844}
845
846
847void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
848  // ----------- S t a t e -------------
849  //  -- r2    : name
850  //  -- lr    : return address
851  // -----------------------------------
852
853  // Check if the name is a string.
854  Label miss;
855  __ tst(r2, Operand(kSmiTagMask));
856  __ b(eq, &miss);
857  __ IsObjectJSStringType(r2, r0, &miss);
858
859  GenerateCallNormal(masm, argc);
860  __ bind(&miss);
861  GenerateMiss(masm, argc);
862}
863
864
865// Defined in ic.cc.
866Object* LoadIC_Miss(Arguments args);
867
868void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
869  // ----------- S t a t e -------------
870  //  -- r2    : name
871  //  -- lr    : return address
872  //  -- r0    : receiver
873  //  -- sp[0] : receiver
874  // -----------------------------------
875
876  // Probe the stub cache.
877  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
878                                         NOT_IN_LOOP,
879                                         MONOMORPHIC);
880  Isolate::Current()->stub_cache()->GenerateProbe(
881      masm, flags, r0, r2, r3, r4, r5);
882
883  // Cache miss: Jump to runtime.
884  GenerateMiss(masm);
885}
886
887
888void LoadIC::GenerateNormal(MacroAssembler* masm) {
889  // ----------- S t a t e -------------
890  //  -- r2    : name
891  //  -- lr    : return address
892  //  -- r0    : receiver
893  //  -- sp[0] : receiver
894  // -----------------------------------
895  Label miss;
896
897  GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
898
899  // r1: elements
900  GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
901  __ Ret();
902
903  // Cache miss: Jump to runtime.
904  __ bind(&miss);
905  GenerateMiss(masm);
906}
907
908
909void LoadIC::GenerateMiss(MacroAssembler* masm) {
910  // ----------- S t a t e -------------
911  //  -- r2    : name
912  //  -- lr    : return address
913  //  -- r0    : receiver
914  //  -- sp[0] : receiver
915  // -----------------------------------
916  Isolate* isolate = masm->isolate();
917
918  __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
919
920  __ mov(r3, r0);
921  __ Push(r3, r2);
922
923  // Perform tail call to the entry.
924  ExternalReference ref =
925      ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
926  __ TailCallExternalReference(ref, 2, 1);
927}
928
929// Returns the code marker, or the 0 if the code is not marked.
930static inline int InlinedICSiteMarker(Address address,
931                                      Address* inline_end_address) {
932  if (V8::UseCrankshaft()) return false;
933
934  // If the instruction after the call site is not the pseudo instruction nop1
935  // then this is not related to an inlined in-object property load. The nop1
936  // instruction is located just after the call to the IC in the deferred code
937  // handling the miss in the inlined code. After the nop1 instruction there is
938  // a branch instruction for jumping back from the deferred code.
939  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
940  Instr instr_after_call = Assembler::instr_at(address_after_call);
941  int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
942
943  // A negative result means the code is not marked.
944  if (code_marker <= 0) return 0;
945
946  Address address_after_nop = address_after_call + Assembler::kInstrSize;
947  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
948  // There may be some reg-reg move and frame merging code to skip over before
949  // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
950  // code.
951  while (!Assembler::IsBranch(instr_after_nop)) {
952    address_after_nop += Assembler::kInstrSize;
953    instr_after_nop = Assembler::instr_at(address_after_nop);
954  }
955
956  // Find the end of the inlined code for handling the load.
957  int b_offset =
958      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
959  ASSERT(b_offset < 0);  // Jumping back from deferred code.
960  *inline_end_address = address_after_nop + b_offset;
961
962  return code_marker;
963}
964
965
966bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
967  if (V8::UseCrankshaft()) return false;
968
969  // Find the end of the inlined code for handling the load if this is an
970  // inlined IC call site.
971  Address inline_end_address = 0;
972  if (InlinedICSiteMarker(address, &inline_end_address)
973      != Assembler::PROPERTY_ACCESS_INLINED) {
974    return false;
975  }
976
977  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
978  // The immediate must be representable in 12 bits.
979  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
980  Address ldr_property_instr_address =
981      inline_end_address - Assembler::kInstrSize;
982  ASSERT(Assembler::IsLdrRegisterImmediate(
983      Assembler::instr_at(ldr_property_instr_address)));
984  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
985  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
986      ldr_property_instr, offset - kHeapObjectTag);
987  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
988
989  // Indicate that code has changed.
990  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
991
992  // Patch the map check.
993  // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
994  // 4 instructions before the end of the inlined code.
995  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
996  int ldr_map_offset = -4;
997  Address ldr_map_instr_address =
998      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
999  Assembler::set_target_address_at(ldr_map_instr_address,
1000                                   reinterpret_cast<Address>(map));
1001  return true;
1002}
1003
1004
1005bool LoadIC::PatchInlinedContextualLoad(Address address,
1006                                        Object* map,
1007                                        Object* cell,
1008                                        bool is_dont_delete) {
1009  // Find the end of the inlined code for handling the contextual load if
1010  // this is inlined IC call site.
1011  Address inline_end_address = 0;
1012  int marker = InlinedICSiteMarker(address, &inline_end_address);
1013  if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
1014        (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
1015    return false;
1016  }
1017  // On ARM we don't rely on the is_dont_delete argument as the hint is already
1018  // embedded in the code marker.
1019  bool marker_is_dont_delete =
1020      marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
1021
1022  // These are the offsets from the end of the inlined code.
1023  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
1024  int ldr_map_offset = marker_is_dont_delete ? -5: -8;
1025  int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
1026  if (FLAG_debug_code && marker_is_dont_delete) {
1027    // Three extra instructions were generated to check for the_hole_value.
1028    ldr_map_offset -= 3;
1029    ldr_cell_offset -= 3;
1030  }
1031  Address ldr_map_instr_address =
1032      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
1033  Address ldr_cell_instr_address =
1034      inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
1035
1036  // Patch the map check.
1037  Assembler::set_target_address_at(ldr_map_instr_address,
1038                                   reinterpret_cast<Address>(map));
1039  // Patch the cell address.
1040  Assembler::set_target_address_at(ldr_cell_instr_address,
1041                                   reinterpret_cast<Address>(cell));
1042
1043  return true;
1044}
1045
1046
1047bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
1048  if (V8::UseCrankshaft()) return false;
1049
1050  // Find the end of the inlined code for the store if there is an
1051  // inlined version of the store.
1052  Address inline_end_address = 0;
1053  if (InlinedICSiteMarker(address, &inline_end_address)
1054      != Assembler::PROPERTY_ACCESS_INLINED) {
1055    return false;
1056  }
1057
1058  // Compute the address of the map load instruction.
1059  Address ldr_map_instr_address =
1060      inline_end_address -
1061      (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
1062       Assembler::kInstrSize);
1063
1064  // Update the offsets if initializing the inlined store. No reason
1065  // to update the offsets when clearing the inlined version because
1066  // it will bail out in the map check.
1067  if (map != HEAP->null_value()) {
1068    // Patch the offset in the actual store instruction.
1069    Address str_property_instr_address =
1070        ldr_map_instr_address + 3 * Assembler::kInstrSize;
1071    Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
1072    ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
1073    str_property_instr = Assembler::SetStrRegisterImmediateOffset(
1074        str_property_instr, offset - kHeapObjectTag);
1075    Assembler::instr_at_put(str_property_instr_address, str_property_instr);
1076
1077    // Patch the offset in the add instruction that is part of the
1078    // write barrier.
1079    Address add_offset_instr_address =
1080        str_property_instr_address + Assembler::kInstrSize;
1081    Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
1082    ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
1083    add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
1084        add_offset_instr, offset - kHeapObjectTag);
1085    Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
1086
1087    // Indicate that code has changed.
1088    CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
1089  }
1090
1091  // Patch the map check.
1092  Assembler::set_target_address_at(ldr_map_instr_address,
1093                                   reinterpret_cast<Address>(map));
1094
1095  return true;
1096}
1097
1098
1099bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
1100  if (V8::UseCrankshaft()) return false;
1101
1102  Address inline_end_address = 0;
1103  if (InlinedICSiteMarker(address, &inline_end_address)
1104      != Assembler::PROPERTY_ACCESS_INLINED) {
1105    return false;
1106  }
1107
1108  // Patch the map check.
1109  Address ldr_map_instr_address =
1110      inline_end_address -
1111      (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
1112      Assembler::kInstrSize);
1113  Assembler::set_target_address_at(ldr_map_instr_address,
1114                                   reinterpret_cast<Address>(map));
1115  return true;
1116}
1117
1118
1119bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
1120  if (V8::UseCrankshaft()) return false;
1121
1122  // Find the end of the inlined code for handling the store if this is an
1123  // inlined IC call site.
1124  Address inline_end_address = 0;
1125  if (InlinedICSiteMarker(address, &inline_end_address)
1126      != Assembler::PROPERTY_ACCESS_INLINED) {
1127    return false;
1128  }
1129
1130  // Patch the map check.
1131  Address ldr_map_instr_address =
1132      inline_end_address -
1133      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
1134      Assembler::kInstrSize);
1135  Assembler::set_target_address_at(ldr_map_instr_address,
1136                                   reinterpret_cast<Address>(map));
1137  return true;
1138}
1139
1140
1141Object* KeyedLoadIC_Miss(Arguments args);
1142
1143
1144void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
1145  // ---------- S t a t e --------------
1146  //  -- lr     : return address
1147  //  -- r0     : key
1148  //  -- r1     : receiver
1149  // -----------------------------------
1150  Isolate* isolate = masm->isolate();
1151
1152  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
1153
1154  __ Push(r1, r0);
1155
1156  ExternalReference ref =
1157      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
1158  __ TailCallExternalReference(ref, 2, 1);
1159}
1160
1161
1162void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
1163  // ---------- S t a t e --------------
1164  //  -- lr     : return address
1165  //  -- r0     : key
1166  //  -- r1     : receiver
1167  // -----------------------------------
1168
1169  __ Push(r1, r0);
1170
1171  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
1172}
1173
1174
1175void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
1176  // ---------- S t a t e --------------
1177  //  -- lr     : return address
1178  //  -- r0     : key
1179  //  -- r1     : receiver
1180  // -----------------------------------
1181  Label slow, check_string, index_smi, index_string, property_array_property;
1182  Label probe_dictionary, check_number_dictionary;
1183
1184  Register key = r0;
1185  Register receiver = r1;
1186
1187  Isolate* isolate = masm->isolate();
1188
1189  // Check that the key is a smi.
1190  __ JumpIfNotSmi(key, &check_string);
1191  __ bind(&index_smi);
1192  // Now the key is known to be a smi. This place is also jumped to from below
1193  // where a numeric string is converted to a smi.
1194
1195  GenerateKeyedLoadReceiverCheck(
1196      masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
1197
1198  // Check the "has fast elements" bit in the receiver's map which is
1199  // now in r2.
1200  __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
1201  __ tst(r3, Operand(1 << Map::kHasFastElements));
1202  __ b(eq, &check_number_dictionary);
1203
1204  GenerateFastArrayLoad(
1205      masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
1206  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
1207  __ Ret();
1208
1209  __ bind(&check_number_dictionary);
1210  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
1211  __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
1212
1213  // Check whether the elements is a number dictionary.
1214  // r0: key
1215  // r3: elements map
1216  // r4: elements
1217  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
1218  __ cmp(r3, ip);
1219  __ b(ne, &slow);
1220  __ mov(r2, Operand(r0, ASR, kSmiTagSize));
1221  GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
1222  __ Ret();
1223
1224  // Slow case, key and receiver still in r0 and r1.
1225  __ bind(&slow);
1226  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
1227                      1, r2, r3);
1228  GenerateRuntimeGetProperty(masm);
1229
1230  __ bind(&check_string);
1231  GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
1232
1233  GenerateKeyedLoadReceiverCheck(
1234      masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
1235
1236  // If the receiver is a fast-case object, check the keyed lookup
1237  // cache. Otherwise probe the dictionary.
1238  __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
1239  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1240  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
1241  __ cmp(r4, ip);
1242  __ b(eq, &probe_dictionary);
1243
1244  // Load the map of the receiver, compute the keyed lookup cache hash
1245  // based on 32 bits of the map pointer and the string hash.
1246  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1247  __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
1248  __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
1249  __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
1250  __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
1251
1252  // Load the key (consisting of map and symbol) from the cache and
1253  // check for match.
1254  ExternalReference cache_keys =
1255      ExternalReference::keyed_lookup_cache_keys(isolate);
1256  __ mov(r4, Operand(cache_keys));
1257  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
1258  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));  // Move r4 to symbol.
1259  __ cmp(r2, r5);
1260  __ b(ne, &slow);
1261  __ ldr(r5, MemOperand(r4));
1262  __ cmp(r0, r5);
1263  __ b(ne, &slow);
1264
1265  // Get field offset.
1266  // r0     : key
1267  // r1     : receiver
1268  // r2     : receiver's map
1269  // r3     : lookup cache index
1270  ExternalReference cache_field_offsets =
1271      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
1272  __ mov(r4, Operand(cache_field_offsets));
1273  __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
1274  __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
1275  __ sub(r5, r5, r6, SetCC);
1276  __ b(ge, &property_array_property);
1277
1278  // Load in-object property.
1279  __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
1280  __ add(r6, r6, r5);  // Index from start of object.
1281  __ sub(r1, r1, Operand(kHeapObjectTag));  // Remove the heap tag.
1282  __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
1283  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1284                      1, r2, r3);
1285  __ Ret();
1286
1287  // Load property array property.
1288  __ bind(&property_array_property);
1289  __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
1290  __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1291  __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
1292  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1293                      1, r2, r3);
1294  __ Ret();
1295
1296  // Do a quick inline probe of the receiver's dictionary, if it
1297  // exists.
1298  __ bind(&probe_dictionary);
1299  // r1: receiver
1300  // r0: key
1301  // r3: elements
1302  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1303  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
1304  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
1305  // Load the property to r0.
1306  GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
1307  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
1308                      1, r2, r3);
1309  __ Ret();
1310
1311  __ bind(&index_string);
1312  __ IndexFromHash(r3, key);
1313  // Now jump to the place where smi keys are handled.
1314  __ jmp(&index_smi);
1315}
1316
1317
1318void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
1319  // ---------- S t a t e --------------
1320  //  -- lr     : return address
1321  //  -- r0     : key (index)
1322  //  -- r1     : receiver
1323  // -----------------------------------
1324  Label miss;
1325
1326  Register receiver = r1;
1327  Register index = r0;
1328  Register scratch1 = r2;
1329  Register scratch2 = r3;
1330  Register result = r0;
1331
1332  StringCharAtGenerator char_at_generator(receiver,
1333                                          index,
1334                                          scratch1,
1335                                          scratch2,
1336                                          result,
1337                                          &miss,  // When not a string.
1338                                          &miss,  // When not a number.
1339                                          &miss,  // When index out of range.
1340                                          STRING_INDEX_IS_ARRAY_INDEX);
1341  char_at_generator.GenerateFast(masm);
1342  __ Ret();
1343
1344  StubRuntimeCallHelper call_helper;
1345  char_at_generator.GenerateSlow(masm, call_helper);
1346
1347  __ bind(&miss);
1348  GenerateMiss(masm);
1349}
1350
1351
1352void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1353  // ---------- S t a t e --------------
1354  //  -- lr     : return address
1355  //  -- r0     : key
1356  //  -- r1     : receiver
1357  // -----------------------------------
1358  Label slow;
1359
1360  // Check that the receiver isn't a smi.
1361  __ JumpIfSmi(r1, &slow);
1362
1363  // Check that the key is an array index, that is Uint32.
1364  __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
1365  __ b(ne, &slow);
1366
1367  // Get the map of the receiver.
1368  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1369
1370  // Check that it has indexed interceptor and access checks
1371  // are not enabled for this object.
1372  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1373  __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
1374  __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
1375  __ b(ne, &slow);
1376
1377  // Everything is fine, call runtime.
1378  __ Push(r1, r0);  // Receiver, key.
1379
1380  // Perform tail call to the entry.
1381  __ TailCallExternalReference(
1382      ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
1383                        masm->isolate()),
1384      2,
1385      1);
1386
1387  __ bind(&slow);
1388  GenerateMiss(masm);
1389}
1390
1391
1392void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1393  // ---------- S t a t e --------------
1394  //  -- r0     : value
1395  //  -- r1     : key
1396  //  -- r2     : receiver
1397  //  -- lr     : return address
1398  // -----------------------------------
1399
1400  // Push receiver, key and value for runtime call.
1401  __ Push(r2, r1, r0);
1402
1403  ExternalReference ref =
1404      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1405  __ TailCallExternalReference(ref, 3, 1);
1406}
1407
1408
1409void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1410                                              StrictModeFlag strict_mode) {
1411  // ---------- S t a t e --------------
1412  //  -- r0     : value
1413  //  -- r1     : key
1414  //  -- r2     : receiver
1415  //  -- lr     : return address
1416  // -----------------------------------
1417
1418  // Push receiver, key and value for runtime call.
1419  __ Push(r2, r1, r0);
1420
1421  __ mov(r1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes
1422  __ mov(r0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
1423  __ Push(r1, r0);
1424
1425  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1426}
1427
1428
1429void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1430                                   StrictModeFlag strict_mode) {
1431  // ---------- S t a t e --------------
1432  //  -- r0     : value
1433  //  -- r1     : key
1434  //  -- r2     : receiver
1435  //  -- lr     : return address
1436  // -----------------------------------
1437  Label slow, fast, array, extra;
1438
1439  // Register usage.
1440  Register value = r0;
1441  Register key = r1;
1442  Register receiver = r2;
1443  Register elements = r3;  // Elements array of the receiver.
1444  // r4 and r5 are used as general scratch registers.
1445
1446  // Check that the key is a smi.
1447  __ tst(key, Operand(kSmiTagMask));
1448  __ b(ne, &slow);
1449  // Check that the object isn't a smi.
1450  __ tst(receiver, Operand(kSmiTagMask));
1451  __ b(eq, &slow);
1452  // Get the map of the object.
1453  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
1454  // Check that the receiver does not require access checks.  We need
1455  // to do this because this generic stub does not perform map checks.
1456  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
1457  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1458  __ b(ne, &slow);
1459  // Check if the object is a JS array or not.
1460  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1461  __ cmp(r4, Operand(JS_ARRAY_TYPE));
1462  __ b(eq, &array);
1463  // Check that the object is some kind of JS object.
1464  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1465  __ b(lt, &slow);
1466
1467  // Object case: Check key against length in the elements array.
1468  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1469  // Check that the object is in fast mode and writable.
1470  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1471  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1472  __ cmp(r4, ip);
1473  __ b(ne, &slow);
1474  // Check array bounds. Both the key and the length of FixedArray are smis.
1475  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1476  __ cmp(key, Operand(ip));
1477  __ b(lo, &fast);
1478
1479  // Slow case, handle jump to runtime.
1480  __ bind(&slow);
1481  // Entry registers are intact.
1482  // r0: value.
1483  // r1: key.
1484  // r2: receiver.
1485  GenerateRuntimeSetProperty(masm, strict_mode);
1486
1487  // Extra capacity case: Check if there is extra capacity to
1488  // perform the store and update the length. Used for adding one
1489  // element to the array by writing to array[array.length].
1490  __ bind(&extra);
1491  // Condition code from comparing key and array length is still available.
1492  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
1493  // Check for room in the elements backing store.
1494  // Both the key and the length of FixedArray are smis.
1495  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1496  __ cmp(key, Operand(ip));
1497  __ b(hs, &slow);
1498  // Calculate key + 1 as smi.
1499  ASSERT_EQ(0, kSmiTag);
1500  __ add(r4, key, Operand(Smi::FromInt(1)));
1501  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1502  __ b(&fast);
1503
1504  // Array case: Get the length and the elements array from the JS
1505  // array. Check that the array is in fast mode (and writable); if it
1506  // is the length is always a smi.
1507  __ bind(&array);
1508  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1509  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1510  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1511  __ cmp(r4, ip);
1512  __ b(ne, &slow);
1513
1514  // Check the key against the length in the array.
1515  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1516  __ cmp(key, Operand(ip));
1517  __ b(hs, &extra);
1518  // Fall through to fast case.
1519
1520  __ bind(&fast);
1521  // Fast case, store the value to the elements backing store.
1522  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1523  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
1524  __ str(value, MemOperand(r5));
1525  // Skip write barrier if the written value is a smi.
1526  __ tst(value, Operand(kSmiTagMask));
1527  __ Ret(eq);
1528  // Update write barrier for the elements array address.
1529  __ sub(r4, r5, Operand(elements));
1530  __ RecordWrite(elements, Operand(r4), r5, r6);
1531
1532  __ Ret();
1533}
1534
1535
1536void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
1537                                  StrictModeFlag strict_mode) {
1538  // ----------- S t a t e -------------
1539  //  -- r0    : value
1540  //  -- r1    : receiver
1541  //  -- r2    : name
1542  //  -- lr    : return address
1543  // -----------------------------------
1544
1545  // Get the receiver from the stack and probe the stub cache.
1546  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
1547                                         NOT_IN_LOOP,
1548                                         MONOMORPHIC,
1549                                         strict_mode);
1550
1551  Isolate::Current()->stub_cache()->GenerateProbe(
1552      masm, flags, r1, r2, r3, r4, r5);
1553
1554  // Cache miss: Jump to runtime.
1555  GenerateMiss(masm);
1556}
1557
1558
1559void StoreIC::GenerateMiss(MacroAssembler* masm) {
1560  // ----------- S t a t e -------------
1561  //  -- r0    : value
1562  //  -- r1    : receiver
1563  //  -- r2    : name
1564  //  -- lr    : return address
1565  // -----------------------------------
1566
1567  __ Push(r1, r2, r0);
1568
1569  // Perform tail call to the entry.
1570  ExternalReference ref =
1571      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1572  __ TailCallExternalReference(ref, 3, 1);
1573}
1574
1575
1576void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
1577  // ----------- S t a t e -------------
1578  //  -- r0    : value
1579  //  -- r1    : receiver
1580  //  -- r2    : name
1581  //  -- lr    : return address
1582  // -----------------------------------
1583  //
1584  // This accepts as a receiver anything JSObject::SetElementsLength accepts
1585  // (currently anything except for external and pixel arrays which means
1586  // anything with elements of FixedArray type.), but currently is restricted
1587  // to JSArray.
1588  // Value must be a number, but only smis are accepted as the most common case.
1589
1590  Label miss;
1591
1592  Register receiver = r1;
1593  Register value = r0;
1594  Register scratch = r3;
1595
1596  // Check that the receiver isn't a smi.
1597  __ JumpIfSmi(receiver, &miss);
1598
1599  // Check that the object is a JS array.
1600  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
1601  __ b(ne, &miss);
1602
1603  // Check that elements are FixedArray.
1604  // We rely on StoreIC_ArrayLength below to deal with all types of
1605  // fast elements (including COW).
1606  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
1607  __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
1608  __ b(ne, &miss);
1609
1610  // Check that value is a smi.
1611  __ JumpIfNotSmi(value, &miss);
1612
1613  // Prepare tail call to StoreIC_ArrayLength.
1614  __ Push(receiver, value);
1615
1616  ExternalReference ref =
1617      ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
1618  __ TailCallExternalReference(ref, 2, 1);
1619
1620  __ bind(&miss);
1621
1622  GenerateMiss(masm);
1623}
1624
1625
1626void StoreIC::GenerateNormal(MacroAssembler* masm) {
1627  // ----------- S t a t e -------------
1628  //  -- r0    : value
1629  //  -- r1    : receiver
1630  //  -- r2    : name
1631  //  -- lr    : return address
1632  // -----------------------------------
1633  Label miss;
1634
1635  GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
1636
1637  GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
1638  Counters* counters = masm->isolate()->counters();
1639  __ IncrementCounter(counters->store_normal_hit(),
1640                      1, r4, r5);
1641  __ Ret();
1642
1643  __ bind(&miss);
1644  __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
1645  GenerateMiss(masm);
1646}
1647
1648
1649void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
1650                                  StrictModeFlag strict_mode) {
1651  // ----------- S t a t e -------------
1652  //  -- r0    : value
1653  //  -- r1    : receiver
1654  //  -- r2    : name
1655  //  -- lr    : return address
1656  // -----------------------------------
1657
1658  __ Push(r1, r2, r0);
1659
1660  __ mov(r1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes
1661  __ mov(r0, Operand(Smi::FromInt(strict_mode)));
1662  __ Push(r1, r0);
1663
1664  // Do tail-call to runtime routine.
1665  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1666}
1667
1668
1669#undef __
1670
1671
1672Condition CompareIC::ComputeCondition(Token::Value op) {
1673  switch (op) {
1674    case Token::EQ_STRICT:
1675    case Token::EQ:
1676      return eq;
1677    case Token::LT:
1678      return lt;
1679    case Token::GT:
1680      // Reverse left and right operands to obtain ECMA-262 conversion order.
1681      return lt;
1682    case Token::LTE:
1683      // Reverse left and right operands to obtain ECMA-262 conversion order.
1684      return ge;
1685    case Token::GTE:
1686      return ge;
1687    default:
1688      UNREACHABLE();
1689      return kNoCondition;
1690  }
1691}
1692
1693
1694void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
1695  HandleScope scope;
1696  Handle<Code> rewritten;
1697  State previous_state = GetState();
1698  State state = TargetState(previous_state, false, x, y);
1699  if (state == GENERIC) {
1700    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
1701    rewritten = stub.GetCode();
1702  } else {
1703    ICCompareStub stub(op_, state);
1704    rewritten = stub.GetCode();
1705  }
1706  set_target(*rewritten);
1707
1708#ifdef DEBUG
1709  if (FLAG_trace_ic) {
1710    PrintF("[CompareIC (%s->%s)#%s]\n",
1711           GetStateName(previous_state),
1712           GetStateName(state),
1713           Token::Name(op_));
1714  }
1715#endif
1716
1717  // Activate inlined smi code.
1718  if (previous_state == UNINITIALIZED) {
1719    PatchInlinedSmiCode(address());
1720  }
1721}
1722
1723
1724void PatchInlinedSmiCode(Address address) {
1725  Address cmp_instruction_address =
1726      address + Assembler::kCallTargetAddressOffset;
1727
1728  // If the instruction following the call is not a cmp rx, #yyy, nothing
1729  // was inlined.
1730  Instr instr = Assembler::instr_at(cmp_instruction_address);
1731  if (!Assembler::IsCmpImmediate(instr)) {
1732    return;
1733  }
1734
1735  // The delta to the start of the map check instruction and the
1736  // condition code uses at the patched jump.
1737  int delta = Assembler::GetCmpImmediateRawImmediate(instr);
1738  delta +=
1739      Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
1740  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
1741  // nothing was inlined.
1742  if (delta == 0) {
1743    return;
1744  }
1745
1746#ifdef DEBUG
1747  if (FLAG_trace_ic) {
1748    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
1749           address, cmp_instruction_address, delta);
1750  }
1751#endif
1752
1753  Address patch_address =
1754      cmp_instruction_address - delta * Instruction::kInstrSize;
1755  Instr instr_at_patch = Assembler::instr_at(patch_address);
1756  Instr branch_instr =
1757      Assembler::instr_at(patch_address + Instruction::kInstrSize);
1758  ASSERT(Assembler::IsCmpRegister(instr_at_patch));
1759  ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
1760            Assembler::GetRm(instr_at_patch).code());
1761  ASSERT(Assembler::IsBranch(branch_instr));
1762  if (Assembler::GetCondition(branch_instr) == eq) {
1763    // This is patching a "jump if not smi" site to be active.
1764    // Changing
1765    //   cmp rx, rx
1766    //   b eq, <target>
1767    // to
1768    //   tst rx, #kSmiTagMask
1769    //   b ne, <target>
1770    CodePatcher patcher(patch_address, 2);
1771    Register reg = Assembler::GetRn(instr_at_patch);
1772    patcher.masm()->tst(reg, Operand(kSmiTagMask));
1773    patcher.EmitCondition(ne);
1774  } else {
1775    ASSERT(Assembler::GetCondition(branch_instr) == ne);
1776    // This is patching a "jump if smi" site to be active.
1777    // Changing
1778    //   cmp rx, rx
1779    //   b ne, <target>
1780    // to
1781    //   tst rx, #kSmiTagMask
1782    //   b eq, <target>
1783    CodePatcher patcher(patch_address, 2);
1784    Register reg = Assembler::GetRn(instr_at_patch);
1785    patcher.masm()->tst(reg, Operand(kSmiTagMask));
1786    patcher.EmitCondition(eq);
1787  }
1788}
1789
1790
1791} }  // namespace v8::internal
1792
1793#endif  // V8_TARGET_ARCH_ARM
1794