ic-arm.cc revision 3e5fa29ddb82551500b118e9bf37af3966277b70
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "assembler-arm.h"
33#include "code-stubs.h"
34#include "codegen-inl.h"
35#include "disasm.h"
36#include "ic-inl.h"
37#include "runtime.h"
38#include "stub-cache.h"
39
40namespace v8 {
41namespace internal {
42
43
44// ----------------------------------------------------------------------------
45// Static IC stub generators.
46//
47
48#define __ ACCESS_MASM(masm)
49
50
51static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52                                            Register type,
53                                            Label* global_object) {
54  // Register usage:
55  //   type: holds the receiver instance type on entry.
56  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
57  __ b(eq, global_object);
58  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
59  __ b(eq, global_object);
60  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
61  __ b(eq, global_object);
62}
63
64
65// Generated code falls through if the receiver is a regular non-global
66// JS object with slow properties and no interceptors.
67static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
68                                                  Register receiver,
69                                                  Register elements,
70                                                  Register t0,
71                                                  Register t1,
72                                                  Label* miss) {
73  // Register usage:
74  //   receiver: holds the receiver on entry and is unchanged.
75  //   elements: holds the property dictionary on fall through.
76  // Scratch registers:
77  //   t0: used to holds the receiver map.
78  //   t1: used to holds the receiver instance type, receiver bit mask and
79  //       elements map.
80
81  // Check that the receiver isn't a smi.
82  __ tst(receiver, Operand(kSmiTagMask));
83  __ b(eq, miss);
84
85  // Check that the receiver is a valid JS object.
86  __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
87  __ b(lt, miss);
88
89  // If this assert fails, we have to check upper bound too.
90  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
91
92  GenerateGlobalInstanceTypeCheck(masm, t1, miss);
93
94  // Check that the global object does not require access checks.
95  __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
96  __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
97                     (1 << Map::kHasNamedInterceptor)));
98  __ b(nz, miss);
99
100  __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
101  __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
102  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
103  __ cmp(t1, ip);
104  __ b(nz, miss);
105}
106
107
108// Probe the string dictionary in the |elements| register. Jump to the
109// |done| label if a property with the given name is found. Jump to
110// the |miss| label otherwise.
111static void GenerateStringDictionaryProbes(MacroAssembler* masm,
112                                           Label* miss,
113                                           Label* done,
114                                           Register elements,
115                                           Register name,
116                                           Register scratch1,
117                                           Register scratch2) {
118  // Compute the capacity mask.
119  const int kCapacityOffset = StringDictionary::kHeaderSize +
120      StringDictionary::kCapacityIndex * kPointerSize;
121  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
122  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
123  __ sub(scratch1, scratch1, Operand(1));
124
125  const int kElementsStartOffset = StringDictionary::kHeaderSize +
126      StringDictionary::kElementsStartIndex * kPointerSize;
127
128  // Generate an unrolled loop that performs a few probes before
129  // giving up. Measurements done on Gmail indicate that 2 probes
130  // cover ~93% of loads from dictionaries.
131  static const int kProbes = 4;
132  for (int i = 0; i < kProbes; i++) {
133    // Compute the masked index: (hash + i + i * i) & mask.
134    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
135    if (i > 0) {
136      // Add the probe offset (i + i * i) left shifted to avoid right shifting
137      // the hash in a separate instruction. The value hash + i + i * i is right
138      // shifted in the following and instruction.
139      ASSERT(StringDictionary::GetProbeOffset(i) <
140             1 << (32 - String::kHashFieldOffset));
141      __ add(scratch2, scratch2, Operand(
142          StringDictionary::GetProbeOffset(i) << String::kHashShift));
143    }
144    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
145
146    // Scale the index by multiplying by the element size.
147    ASSERT(StringDictionary::kEntrySize == 3);
148    // scratch2 = scratch2 * 3.
149    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
150
151    // Check if the key is identical to the name.
152    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
153    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
154    __ cmp(name, Operand(ip));
155    if (i != kProbes - 1) {
156      __ b(eq, done);
157    } else {
158      __ b(ne, miss);
159    }
160  }
161}
162
163
164// Helper function used from LoadIC/CallIC GenerateNormal.
165//
166// elements: Property dictionary. It is not clobbered if a jump to the miss
167//           label is done.
168// name:     Property name. It is not clobbered if a jump to the miss label is
169//           done
170// result:   Register for the result. It is only updated if a jump to the miss
171//           label is not done. Can be the same as elements or name clobbering
172//           one of these in the case of not jumping to the miss label.
173// The two scratch registers need to be different from elements, name and
174// result.
175// The generated code assumes that the receiver has slow properties,
176// is not a global object and does not have interceptors.
177static void GenerateDictionaryLoad(MacroAssembler* masm,
178                                   Label* miss,
179                                   Register elements,
180                                   Register name,
181                                   Register result,
182                                   Register scratch1,
183                                   Register scratch2) {
184  // Main use of the scratch registers.
185  // scratch1: Used as temporary and to hold the capacity of the property
186  //           dictionary.
187  // scratch2: Used as temporary.
188  Label done;
189
190  // Probe the dictionary.
191  GenerateStringDictionaryProbes(masm,
192                                 miss,
193                                 &done,
194                                 elements,
195                                 name,
196                                 scratch1,
197                                 scratch2);
198
199  // If probing finds an entry check that the value is a normal
200  // property.
201  __ bind(&done);  // scratch2 == elements + 4 * index
202  const int kElementsStartOffset = StringDictionary::kHeaderSize +
203      StringDictionary::kElementsStartIndex * kPointerSize;
204  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
205  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
206  __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
207  __ b(ne, miss);
208
209  // Get the value at the masked, scaled index and return.
210  __ ldr(result,
211         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
212}
213
214
215// Helper function used from StoreIC::GenerateNormal.
216//
217// elements: Property dictionary. It is not clobbered if a jump to the miss
218//           label is done.
219// name:     Property name. It is not clobbered if a jump to the miss label is
220//           done
221// value:    The value to store.
222// The two scratch registers need to be different from elements, name and
223// result.
224// The generated code assumes that the receiver has slow properties,
225// is not a global object and does not have interceptors.
226static void GenerateDictionaryStore(MacroAssembler* masm,
227                                    Label* miss,
228                                    Register elements,
229                                    Register name,
230                                    Register value,
231                                    Register scratch1,
232                                    Register scratch2) {
233  // Main use of the scratch registers.
234  // scratch1: Used as temporary and to hold the capacity of the property
235  //           dictionary.
236  // scratch2: Used as temporary.
237  Label done;
238
239  // Probe the dictionary.
240  GenerateStringDictionaryProbes(masm,
241                                 miss,
242                                 &done,
243                                 elements,
244                                 name,
245                                 scratch1,
246                                 scratch2);
247
248  // If probing finds an entry in the dictionary check that the value
249  // is a normal property that is not read only.
250  __ bind(&done);  // scratch2 == elements + 4 * index
251  const int kElementsStartOffset = StringDictionary::kHeaderSize +
252      StringDictionary::kElementsStartIndex * kPointerSize;
253  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
254  const int kTypeAndReadOnlyMask
255      = (PropertyDetails::TypeField::mask() |
256         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
257  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
258  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
259  __ b(ne, miss);
260
261  // Store the value at the masked, scaled index and return.
262  const int kValueOffset = kElementsStartOffset + kPointerSize;
263  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
264  __ str(value, MemOperand(scratch2));
265
266  // Update the write barrier. Make sure not to clobber the value.
267  __ mov(scratch1, value);
268  __ RecordWrite(elements, scratch2, scratch1);
269}
270
271
272static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
273                                         Label* miss,
274                                         Register elements,
275                                         Register key,
276                                         Register result,
277                                         Register t0,
278                                         Register t1,
279                                         Register t2) {
280  // Register use:
281  //
282  // elements - holds the slow-case elements of the receiver on entry.
283  //            Unchanged unless 'result' is the same register.
284  //
285  // key      - holds the smi key on entry.
286  //            Unchanged unless 'result' is the same register.
287  //
288  // result   - holds the result on exit if the load succeeded.
289  //            Allowed to be the same as 'key' or 'result'.
290  //            Unchanged on bailout so 'key' or 'result' can be used
291  //            in further computation.
292  //
293  // Scratch registers:
294  //
295  // t0 - holds the untagged key on entry and holds the hash once computed.
296  //
297  // t1 - used to hold the capacity mask of the dictionary
298  //
299  // t2 - used for the index into the dictionary.
300  Label done;
301
302  // Compute the hash code from the untagged key.  This must be kept in sync
303  // with ComputeIntegerHash in utils.h.
304  //
305  // hash = ~hash + (hash << 15);
306  __ mvn(t1, Operand(t0));
307  __ add(t0, t1, Operand(t0, LSL, 15));
308  // hash = hash ^ (hash >> 12);
309  __ eor(t0, t0, Operand(t0, LSR, 12));
310  // hash = hash + (hash << 2);
311  __ add(t0, t0, Operand(t0, LSL, 2));
312  // hash = hash ^ (hash >> 4);
313  __ eor(t0, t0, Operand(t0, LSR, 4));
314  // hash = hash * 2057;
315  __ mov(t1, Operand(2057));
316  __ mul(t0, t0, t1);
317  // hash = hash ^ (hash >> 16);
318  __ eor(t0, t0, Operand(t0, LSR, 16));
319
320  // Compute the capacity mask.
321  __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
322  __ mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
323  __ sub(t1, t1, Operand(1));
324
325  // Generate an unrolled loop that performs a few probes before giving up.
326  static const int kProbes = 4;
327  for (int i = 0; i < kProbes; i++) {
328    // Use t2 for index calculations and keep the hash intact in t0.
329    __ mov(t2, t0);
330    // Compute the masked index: (hash + i + i * i) & mask.
331    if (i > 0) {
332      __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
333    }
334    __ and_(t2, t2, Operand(t1));
335
336    // Scale the index by multiplying by the element size.
337    ASSERT(NumberDictionary::kEntrySize == 3);
338    __ add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
339
340    // Check if the key is identical to the name.
341    __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
342    __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
343    __ cmp(key, Operand(ip));
344    if (i != kProbes - 1) {
345      __ b(eq, &done);
346    } else {
347      __ b(ne, miss);
348    }
349  }
350
351  __ bind(&done);
352  // Check that the value is a normal property.
353  // t2: elements + (index * kPointerSize)
354  const int kDetailsOffset =
355      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
356  __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
357  __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
358  __ b(ne, miss);
359
360  // Get the value at the masked, scaled index and return.
361  const int kValueOffset =
362      NumberDictionary::kElementsStartOffset + kPointerSize;
363  __ ldr(result, FieldMemOperand(t2, kValueOffset));
364}
365
366
367void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
368  // ----------- S t a t e -------------
369  //  -- r2    : name
370  //  -- lr    : return address
371  //  -- r0    : receiver
372  //  -- sp[0] : receiver
373  // -----------------------------------
374  Label miss;
375
376  StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
377  __ bind(&miss);
378  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
379}
380
381
382void LoadIC::GenerateStringLength(MacroAssembler* masm) {
383  // ----------- S t a t e -------------
384  //  -- r2    : name
385  //  -- lr    : return address
386  //  -- r0    : receiver
387  //  -- sp[0] : receiver
388  // -----------------------------------
389  Label miss;
390
391  StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
392  // Cache miss: Jump to runtime.
393  __ bind(&miss);
394  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
395}
396
397
398void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
399  // ----------- S t a t e -------------
400  //  -- r2    : name
401  //  -- lr    : return address
402  //  -- r0    : receiver
403  //  -- sp[0] : receiver
404  // -----------------------------------
405  Label miss;
406
407  StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
408  __ bind(&miss);
409  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
410}
411
412
413// Checks the receiver for special cases (value type, slow case bits).
414// Falls through for regular JS object.
415static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
416                                           Register receiver,
417                                           Register map,
418                                           Register scratch,
419                                           int interceptor_bit,
420                                           Label* slow) {
421  // Check that the object isn't a smi.
422  __ BranchOnSmi(receiver, slow);
423  // Get the map of the receiver.
424  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
425  // Check bit field.
426  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
427  __ tst(scratch,
428         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
429  __ b(nz, slow);
430  // Check that the object is some kind of JS object EXCEPT JS Value type.
431  // In the case that the object is a value-wrapper object,
432  // we enter the runtime system to make sure that indexing into string
433  // objects work as intended.
434  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
435  __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
436  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
437  __ b(lt, slow);
438}
439
440
441// Loads an indexed element from a fast case array.
442// If not_fast_array is NULL, doesn't perform the elements map check.
443static void GenerateFastArrayLoad(MacroAssembler* masm,
444                                  Register receiver,
445                                  Register key,
446                                  Register elements,
447                                  Register scratch1,
448                                  Register scratch2,
449                                  Register result,
450                                  Label* not_fast_array,
451                                  Label* out_of_range) {
452  // Register use:
453  //
454  // receiver - holds the receiver on entry.
455  //            Unchanged unless 'result' is the same register.
456  //
457  // key      - holds the smi key on entry.
458  //            Unchanged unless 'result' is the same register.
459  //
460  // elements - holds the elements of the receiver on exit.
461  //
462  // result   - holds the result on exit if the load succeeded.
463  //            Allowed to be the the same as 'receiver' or 'key'.
464  //            Unchanged on bailout so 'receiver' and 'key' can be safely
465  //            used by further computation.
466  //
467  // Scratch registers:
468  //
469  // scratch1 - used to hold elements map and elements length.
470  //            Holds the elements map if not_fast_array branch is taken.
471  //
472  // scratch2 - used to hold the loaded value.
473
474  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
475  if (not_fast_array != NULL) {
476    // Check that the object is in fast mode and writable.
477    __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
478    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
479    __ cmp(scratch1, ip);
480    __ b(ne, not_fast_array);
481  } else {
482    __ AssertFastElements(elements);
483  }
484  // Check that the key (index) is within bounds.
485  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
486  __ cmp(key, Operand(scratch1));
487  __ b(hs, out_of_range);
488  // Fast case: Do the load.
489  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
490  // The key is a smi.
491  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
492  __ ldr(scratch2,
493         MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
494  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
495  __ cmp(scratch2, ip);
496  // In case the loaded value is the_hole we have to consult GetProperty
497  // to ensure the prototype chain is searched.
498  __ b(eq, out_of_range);
499  __ mov(result, scratch2);
500}
501
502
503// Checks whether a key is an array index string or a symbol string.
504// Falls through if a key is a symbol.
505static void GenerateKeyStringCheck(MacroAssembler* masm,
506                                   Register key,
507                                   Register map,
508                                   Register hash,
509                                   Label* index_string,
510                                   Label* not_symbol) {
511  // The key is not a smi.
512  // Is it a string?
513  __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
514  __ b(ge, not_symbol);
515
516  // Is the string an array index, with cached numeric value?
517  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
518  __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
519  __ b(eq, index_string);
520
521  // Is the string a symbol?
522  // map: key map
523  __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
524  ASSERT(kSymbolTag != 0);
525  __ tst(hash, Operand(kIsSymbolMask));
526  __ b(eq, not_symbol);
527}
528
529
530// Defined in ic.cc.
531Object* CallIC_Miss(Arguments args);
532
533// The generated code does not accept smi keys.
534// The generated code falls through if both probes miss.
535static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
536                                          int argc,
537                                          Code::Kind kind) {
538  // ----------- S t a t e -------------
539  //  -- r1    : receiver
540  //  -- r2    : name
541  // -----------------------------------
542  Label number, non_number, non_string, boolean, probe, miss;
543
544  // Probe the stub cache.
545  Code::Flags flags =
546      Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
547  StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
548
549  // If the stub cache probing failed, the receiver might be a value.
550  // For value objects, we use the map of the prototype objects for
551  // the corresponding JSValue for the cache and that is what we need
552  // to probe.
553  //
554  // Check for number.
555  __ tst(r1, Operand(kSmiTagMask));
556  __ b(eq, &number);
557  __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
558  __ b(ne, &non_number);
559  __ bind(&number);
560  StubCompiler::GenerateLoadGlobalFunctionPrototype(
561      masm, Context::NUMBER_FUNCTION_INDEX, r1);
562  __ b(&probe);
563
564  // Check for string.
565  __ bind(&non_number);
566  __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
567  __ b(hs, &non_string);
568  StubCompiler::GenerateLoadGlobalFunctionPrototype(
569      masm, Context::STRING_FUNCTION_INDEX, r1);
570  __ b(&probe);
571
572  // Check for boolean.
573  __ bind(&non_string);
574  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
575  __ cmp(r1, ip);
576  __ b(eq, &boolean);
577  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
578  __ cmp(r1, ip);
579  __ b(ne, &miss);
580  __ bind(&boolean);
581  StubCompiler::GenerateLoadGlobalFunctionPrototype(
582      masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
583
584  // Probe the stub cache for the value object.
585  __ bind(&probe);
586  StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
587
588  __ bind(&miss);
589}
590
591
592static void GenerateFunctionTailCall(MacroAssembler* masm,
593                                     int argc,
594                                     Label* miss,
595                                     Register scratch) {
596  // r1: function
597
598  // Check that the value isn't a smi.
599  __ tst(r1, Operand(kSmiTagMask));
600  __ b(eq, miss);
601
602  // Check that the value is a JSFunction.
603  __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
604  __ b(ne, miss);
605
606  // Invoke the function.
607  ParameterCount actual(argc);
608  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
609}
610
611
612static void GenerateCallNormal(MacroAssembler* masm, int argc) {
613  // ----------- S t a t e -------------
614  //  -- r2    : name
615  //  -- lr    : return address
616  // -----------------------------------
617  Label miss;
618
619  // Get the receiver of the function from the stack into r1.
620  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
621
622  GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
623
624  // r0: elements
625  // Search the dictionary - put result in register r1.
626  GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
627
628  GenerateFunctionTailCall(masm, argc, &miss, r4);
629
630  __ bind(&miss);
631}
632
633
634static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
635  // ----------- S t a t e -------------
636  //  -- r2    : name
637  //  -- lr    : return address
638  // -----------------------------------
639
640  if (id == IC::kCallIC_Miss) {
641    __ IncrementCounter(&Counters::call_miss, 1, r3, r4);
642  } else {
643    __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
644  }
645
646  // Get the receiver of the function from the stack.
647  __ ldr(r3, MemOperand(sp, argc * kPointerSize));
648
649  __ EnterInternalFrame();
650
651  // Push the receiver and the name of the function.
652  __ Push(r3, r2);
653
654  // Call the entry.
655  __ mov(r0, Operand(2));
656  __ mov(r1, Operand(ExternalReference(IC_Utility(id))));
657
658  CEntryStub stub(1);
659  __ CallStub(&stub);
660
661  // Move result to r1 and leave the internal frame.
662  __ mov(r1, Operand(r0));
663  __ LeaveInternalFrame();
664
665  // Check if the receiver is a global object of some sort.
666  // This can happen only for regular CallIC but not KeyedCallIC.
667  if (id == IC::kCallIC_Miss) {
668    Label invoke, global;
669    __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
670    __ tst(r2, Operand(kSmiTagMask));
671    __ b(eq, &invoke);
672    __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
673    __ b(eq, &global);
674    __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
675    __ b(ne, &invoke);
676
677    // Patch the receiver on the stack.
678    __ bind(&global);
679    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
680    __ str(r2, MemOperand(sp, argc * kPointerSize));
681    __ bind(&invoke);
682  }
683
684  // Invoke the function.
685  ParameterCount actual(argc);
686  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
687}
688
689
690void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
691  // ----------- S t a t e -------------
692  //  -- r2    : name
693  //  -- lr    : return address
694  // -----------------------------------
695
696  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
697}
698
699
700void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
701  // ----------- S t a t e -------------
702  //  -- r2    : name
703  //  -- lr    : return address
704  // -----------------------------------
705
706  // Get the receiver of the function from the stack into r1.
707  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
708  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
709  GenerateMiss(masm, argc);
710}
711
712
713void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
714  // ----------- S t a t e -------------
715  //  -- r2    : name
716  //  -- lr    : return address
717  // -----------------------------------
718
719  GenerateCallNormal(masm, argc);
720  GenerateMiss(masm, argc);
721}
722
723
724void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
725  // ----------- S t a t e -------------
726  //  -- r2    : name
727  //  -- lr    : return address
728  // -----------------------------------
729
730  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
731}
732
733
734void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
735  // ----------- S t a t e -------------
736  //  -- r2    : name
737  //  -- lr    : return address
738  // -----------------------------------
739
740  // Get the receiver of the function from the stack into r1.
741  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
742
743  Label do_call, slow_call, slow_load, slow_reload_receiver;
744  Label check_number_dictionary, check_string, lookup_monomorphic_cache;
745  Label index_smi, index_string;
746
747  // Check that the key is a smi.
748  __ BranchOnNotSmi(r2, &check_string);
749  __ bind(&index_smi);
750  // Now the key is known to be a smi. This place is also jumped to from below
751  // where a numeric string is converted to a smi.
752
753  GenerateKeyedLoadReceiverCheck(
754      masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
755
756  GenerateFastArrayLoad(
757      masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
758  __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1, r0, r3);
759
760  __ bind(&do_call);
761  // receiver in r1 is not used after this point.
762  // r2: key
763  // r1: function
764  GenerateFunctionTailCall(masm, argc, &slow_call, r0);
765
766  __ bind(&check_number_dictionary);
767  // r2: key
768  // r3: elements map
769  // r4: elements
770  // Check whether the elements is a number dictionary.
771  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
772  __ cmp(r3, ip);
773  __ b(ne, &slow_load);
774  __ mov(r0, Operand(r2, ASR, kSmiTagSize));
775  // r0: untagged index
776  GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
777  __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1, r0, r3);
778  __ jmp(&do_call);
779
780  __ bind(&slow_load);
781  // This branch is taken when calling KeyedCallIC_Miss is neither required
782  // nor beneficial.
783  __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1, r0, r3);
784  __ EnterInternalFrame();
785  __ push(r2);  // save the key
786  __ Push(r1, r2);  // pass the receiver and the key
787  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
788  __ pop(r2);  // restore the key
789  __ LeaveInternalFrame();
790  __ mov(r1, r0);
791  __ jmp(&do_call);
792
793  __ bind(&check_string);
794  GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
795
796  // The key is known to be a symbol.
797  // If the receiver is a regular JS object with slow properties then do
798  // a quick inline probe of the receiver's dictionary.
799  // Otherwise do the monomorphic cache probe.
800  GenerateKeyedLoadReceiverCheck(
801      masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
802
803  __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
804  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
805  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
806  __ cmp(r3, ip);
807  __ b(ne, &lookup_monomorphic_cache);
808
809  GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
810  __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
811  __ jmp(&do_call);
812
813  __ bind(&lookup_monomorphic_cache);
814  __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1, r0, r3);
815  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
816  // Fall through on miss.
817
818  __ bind(&slow_call);
819  // This branch is taken if:
820  // - the receiver requires boxing or access check,
821  // - the key is neither smi nor symbol,
822  // - the value loaded is not a function,
823  // - there is hope that the runtime will create a monomorphic call stub
824  //   that will get fetched next time.
825  __ IncrementCounter(&Counters::keyed_call_generic_slow, 1, r0, r3);
826  GenerateMiss(masm, argc);
827
828  __ bind(&index_string);
829  __ IndexFromHash(r3, r2);
830  // Now jump to the place where smi keys are handled.
831  __ jmp(&index_smi);
832}
833
834
835void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
836  // ----------- S t a t e -------------
837  //  -- r2    : name
838  //  -- lr    : return address
839  // -----------------------------------
840
841  GenerateCallNormal(masm, argc);
842  GenerateMiss(masm, argc);
843}
844
845
846// Defined in ic.cc.
847Object* LoadIC_Miss(Arguments args);
848
849void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
850  // ----------- S t a t e -------------
851  //  -- r2    : name
852  //  -- lr    : return address
853  //  -- r0    : receiver
854  //  -- sp[0] : receiver
855  // -----------------------------------
856
857  // Probe the stub cache.
858  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
859                                         NOT_IN_LOOP,
860                                         MONOMORPHIC);
861  StubCache::GenerateProbe(masm, flags, r0, r2, r3, r4, r5);
862
863  // Cache miss: Jump to runtime.
864  GenerateMiss(masm);
865}
866
867
868void LoadIC::GenerateNormal(MacroAssembler* masm) {
869  // ----------- S t a t e -------------
870  //  -- r2    : name
871  //  -- lr    : return address
872  //  -- r0    : receiver
873  //  -- sp[0] : receiver
874  // -----------------------------------
875  Label miss;
876
877  GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
878
879  // r1: elements
880  GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
881  __ Ret();
882
883  // Cache miss: Jump to runtime.
884  __ bind(&miss);
885  GenerateMiss(masm);
886}
887
888
889void LoadIC::GenerateMiss(MacroAssembler* masm) {
890  // ----------- S t a t e -------------
891  //  -- r2    : name
892  //  -- lr    : return address
893  //  -- r0    : receiver
894  //  -- sp[0] : receiver
895  // -----------------------------------
896
897  __ IncrementCounter(&Counters::load_miss, 1, r3, r4);
898
899  __ mov(r3, r0);
900  __ Push(r3, r2);
901
902  // Perform tail call to the entry.
903  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
904  __ TailCallExternalReference(ref, 2, 1);
905}
906
907
908static inline bool IsInlinedICSite(Address address,
909                                   Address* inline_end_address) {
910  // If the instruction after the call site is not the pseudo instruction nop1
911  // then this is not related to an inlined in-object property load. The nop1
912  // instruction is located just after the call to the IC in the deferred code
913  // handling the miss in the inlined code. After the nop1 instruction there is
914  // a branch instruction for jumping back from the deferred code.
915  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
916  Instr instr_after_call = Assembler::instr_at(address_after_call);
917  if (!Assembler::IsNop(instr_after_call, PROPERTY_ACCESS_INLINED)) {
918    return false;
919  }
920  Address address_after_nop = address_after_call + Assembler::kInstrSize;
921  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
922  // There may be some reg-reg move and frame merging code to skip over before
923  // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
924  // code.
925  while (!Assembler::IsBranch(instr_after_nop)) {
926    address_after_nop += Assembler::kInstrSize;
927    instr_after_nop = Assembler::instr_at(address_after_nop);
928  }
929
930  // Find the end of the inlined code for handling the load.
931  int b_offset =
932      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
933  ASSERT(b_offset < 0);  // Jumping back from deferred code.
934  *inline_end_address = address_after_nop + b_offset;
935
936  return true;
937}
938
939
940bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
941  // Find the end of the inlined code for handling the load if this is an
942  // inlined IC call site.
943  Address inline_end_address;
944  if (!IsInlinedICSite(address, &inline_end_address)) return false;
945
946  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
947  // The immediate must be representable in 12 bits.
948  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
949  Address ldr_property_instr_address =
950      inline_end_address - Assembler::kInstrSize;
951  ASSERT(Assembler::IsLdrRegisterImmediate(
952      Assembler::instr_at(ldr_property_instr_address)));
953  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
954  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
955      ldr_property_instr, offset - kHeapObjectTag);
956  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
957
958  // Indicate that code has changed.
959  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
960
961  // Patch the map check.
962  Address ldr_map_instr_address =
963      inline_end_address - 4 * Assembler::kInstrSize;
964  Assembler::set_target_address_at(ldr_map_instr_address,
965                                   reinterpret_cast<Address>(map));
966  return true;
967}
968
969
970bool LoadIC::PatchInlinedContextualLoad(Address address,
971                                        Object* map,
972                                        Object* cell,
973                                        bool is_dont_delete) {
974  // TODO(<bug#>): implement this.
975  return false;
976}
977
978
979bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
980  // Find the end of the inlined code for the store if there is an
981  // inlined version of the store.
982  Address inline_end_address;
983  if (!IsInlinedICSite(address, &inline_end_address)) return false;
984
985  // Compute the address of the map load instruction.
986  Address ldr_map_instr_address =
987      inline_end_address -
988      (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
989       Assembler::kInstrSize);
990
991  // Update the offsets if initializing the inlined store. No reason
992  // to update the offsets when clearing the inlined version because
993  // it will bail out in the map check.
994  if (map != Heap::null_value()) {
995    // Patch the offset in the actual store instruction.
996    Address str_property_instr_address =
997        ldr_map_instr_address + 3 * Assembler::kInstrSize;
998    Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
999    ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
1000    str_property_instr = Assembler::SetStrRegisterImmediateOffset(
1001        str_property_instr, offset - kHeapObjectTag);
1002    Assembler::instr_at_put(str_property_instr_address, str_property_instr);
1003
1004    // Patch the offset in the add instruction that is part of the
1005    // write barrier.
1006    Address add_offset_instr_address =
1007        str_property_instr_address + Assembler::kInstrSize;
1008    Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
1009    ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
1010    add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
1011        add_offset_instr, offset - kHeapObjectTag);
1012    Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
1013
1014    // Indicate that code has changed.
1015    CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
1016  }
1017
1018  // Patch the map check.
1019  Assembler::set_target_address_at(ldr_map_instr_address,
1020                                   reinterpret_cast<Address>(map));
1021
1022  return true;
1023}
1024
1025
1026bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
1027  Address inline_end_address;
1028  if (!IsInlinedICSite(address, &inline_end_address)) return false;
1029
1030  // Patch the map check.
1031  Address ldr_map_instr_address =
1032      inline_end_address -
1033      (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
1034      Assembler::kInstrSize);
1035  Assembler::set_target_address_at(ldr_map_instr_address,
1036                                   reinterpret_cast<Address>(map));
1037  return true;
1038}
1039
1040
1041bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
1042  // Find the end of the inlined code for handling the store if this is an
1043  // inlined IC call site.
1044  Address inline_end_address;
1045  if (!IsInlinedICSite(address, &inline_end_address)) return false;
1046
1047  // Patch the map check.
1048  Address ldr_map_instr_address =
1049      inline_end_address -
1050      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
1051      Assembler::kInstrSize);
1052  Assembler::set_target_address_at(ldr_map_instr_address,
1053                                   reinterpret_cast<Address>(map));
1054  return true;
1055}
1056
1057
1058Object* KeyedLoadIC_Miss(Arguments args);
1059
1060
1061void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
1062  // ---------- S t a t e --------------
1063  //  -- lr     : return address
1064  //  -- r0     : key
1065  //  -- r1     : receiver
1066  // -----------------------------------
1067
1068  __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
1069
1070  __ Push(r1, r0);
1071
1072  ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
1073  __ TailCallExternalReference(ref, 2, 1);
1074}
1075
1076
1077void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
1078  // ---------- S t a t e --------------
1079  //  -- lr     : return address
1080  //  -- r0     : key
1081  //  -- r1     : receiver
1082  // -----------------------------------
1083
1084  __ Push(r1, r0);
1085
1086  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
1087}
1088
1089
1090void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
1091  // ---------- S t a t e --------------
1092  //  -- lr     : return address
1093  //  -- r0     : key
1094  //  -- r1     : receiver
1095  // -----------------------------------
1096  Label slow, check_string, index_smi, index_string, property_array_property;
1097  Label check_pixel_array, probe_dictionary, check_number_dictionary;
1098
1099  Register key = r0;
1100  Register receiver = r1;
1101
1102  // Check that the key is a smi.
1103  __ BranchOnNotSmi(key, &check_string);
1104  __ bind(&index_smi);
1105  // Now the key is known to be a smi. This place is also jumped to from below
1106  // where a numeric string is converted to a smi.
1107
1108  GenerateKeyedLoadReceiverCheck(
1109      masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
1110
1111  // Check the "has fast elements" bit in the receiver's map which is
1112  // now in r2.
1113  __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
1114  __ tst(r3, Operand(1 << Map::kHasFastElements));
1115  __ b(eq, &check_pixel_array);
1116
1117  GenerateFastArrayLoad(
1118      masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
1119  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
1120  __ Ret();
1121
1122  // Check whether the elements is a pixel array.
1123  // r0: key
1124  // r1: receiver
1125  __ bind(&check_pixel_array);
1126  __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
1127  __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
1128  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
1129  __ cmp(r3, ip);
1130  __ b(ne, &check_number_dictionary);
1131  __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
1132  __ mov(r2, Operand(key, ASR, kSmiTagSize));
1133  __ cmp(r2, ip);
1134  __ b(hs, &slow);
1135  __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
1136  __ ldrb(r2, MemOperand(ip, r2));
1137  __ mov(r0, Operand(r2, LSL, kSmiTagSize));  // Tag result as smi.
1138  __ Ret();
1139
1140  __ bind(&check_number_dictionary);
1141  // Check whether the elements is a number dictionary.
1142  // r0: key
1143  // r3: elements map
1144  // r4: elements
1145  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
1146  __ cmp(r3, ip);
1147  __ b(ne, &slow);
1148  __ mov(r2, Operand(r0, ASR, kSmiTagSize));
1149  GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
1150  __ Ret();
1151
1152  // Slow case, key and receiver still in r0 and r1.
1153  __ bind(&slow);
1154  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
1155  GenerateRuntimeGetProperty(masm);
1156
1157  __ bind(&check_string);
1158  GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
1159
1160  GenerateKeyedLoadReceiverCheck(
1161      masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
1162
1163  // If the receiver is a fast-case object, check the keyed lookup
1164  // cache. Otherwise probe the dictionary.
1165  __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
1166  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1167  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
1168  __ cmp(r4, ip);
1169  __ b(eq, &probe_dictionary);
1170
1171  // Load the map of the receiver, compute the keyed lookup cache hash
1172  // based on 32 bits of the map pointer and the string hash.
1173  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1174  __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
1175  __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
1176  __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
1177  __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
1178
1179  // Load the key (consisting of map and symbol) from the cache and
1180  // check for match.
1181  ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys();
1182  __ mov(r4, Operand(cache_keys));
1183  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
1184  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));  // Move r4 to symbol.
1185  __ cmp(r2, r5);
1186  __ b(ne, &slow);
1187  __ ldr(r5, MemOperand(r4));
1188  __ cmp(r0, r5);
1189  __ b(ne, &slow);
1190
1191  // Get field offset.
1192  // r0     : key
1193  // r1     : receiver
1194  // r2     : receiver's map
1195  // r3     : lookup cache index
1196  ExternalReference cache_field_offsets
1197      = ExternalReference::keyed_lookup_cache_field_offsets();
1198  __ mov(r4, Operand(cache_field_offsets));
1199  __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
1200  __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
1201  __ sub(r5, r5, r6, SetCC);
1202  __ b(ge, &property_array_property);
1203
1204  // Load in-object property.
1205  __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
1206  __ add(r6, r6, r5);  // Index from start of object.
1207  __ sub(r1, r1, Operand(kHeapObjectTag));  // Remove the heap tag.
1208  __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
1209  __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
1210  __ Ret();
1211
1212  // Load property array property.
1213  __ bind(&property_array_property);
1214  __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
1215  __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1216  __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
1217  __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
1218  __ Ret();
1219
1220  // Do a quick inline probe of the receiver's dictionary, if it
1221  // exists.
1222  __ bind(&probe_dictionary);
1223  // r1: receiver
1224  // r0: key
1225  // r3: elements
1226  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1227  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
1228  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
1229  // Load the property to r0.
1230  GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
1231  __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
1232  __ Ret();
1233
1234  __ bind(&index_string);
1235  __ IndexFromHash(r3, key);
1236  // Now jump to the place where smi keys are handled.
1237  __ jmp(&index_smi);
1238}
1239
1240
1241void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
1242  // ---------- S t a t e --------------
1243  //  -- lr     : return address
1244  //  -- r0     : key (index)
1245  //  -- r1     : receiver
1246  // -----------------------------------
1247  Label miss;
1248
1249  Register receiver = r1;
1250  Register index = r0;
1251  Register scratch1 = r2;
1252  Register scratch2 = r3;
1253  Register result = r0;
1254
1255  StringCharAtGenerator char_at_generator(receiver,
1256                                          index,
1257                                          scratch1,
1258                                          scratch2,
1259                                          result,
1260                                          &miss,  // When not a string.
1261                                          &miss,  // When not a number.
1262                                          &miss,  // When index out of range.
1263                                          STRING_INDEX_IS_ARRAY_INDEX);
1264  char_at_generator.GenerateFast(masm);
1265  __ Ret();
1266
1267  ICRuntimeCallHelper call_helper;
1268  char_at_generator.GenerateSlow(masm, call_helper);
1269
1270  __ bind(&miss);
1271  GenerateMiss(masm);
1272}
1273
1274
1275// Convert unsigned integer with specified number of leading zeroes in binary
1276// representation to IEEE 754 double.
1277// Integer to convert is passed in register hiword.
1278// Resulting double is returned in registers hiword:loword.
1279// This functions does not work correctly for 0.
1280static void GenerateUInt2Double(MacroAssembler* masm,
1281                                Register hiword,
1282                                Register loword,
1283                                Register scratch,
1284                                int leading_zeroes) {
1285  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1286  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1287
1288  const int mantissa_shift_for_hi_word =
1289      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1290
1291  const int mantissa_shift_for_lo_word =
1292      kBitsPerInt - mantissa_shift_for_hi_word;
1293
1294  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1295  if (mantissa_shift_for_hi_word > 0) {
1296    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1297    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1298  } else {
1299    __ mov(loword, Operand(0, RelocInfo::NONE));
1300    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1301  }
1302
1303  // If least significant bit of biased exponent was not 1 it was corrupted
1304  // by most significant bit of mantissa so we should fix that.
1305  if (!(biased_exponent & 1)) {
1306    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1307  }
1308}
1309
1310
1311void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
1312                                        ExternalArrayType array_type) {
1313  // ---------- S t a t e --------------
1314  //  -- lr     : return address
1315  //  -- r0     : key
1316  //  -- r1     : receiver
1317  // -----------------------------------
1318  Label slow, failed_allocation;
1319
1320  Register key = r0;
1321  Register receiver = r1;
1322
1323  // Check that the object isn't a smi
1324  __ BranchOnSmi(receiver, &slow);
1325
1326  // Check that the key is a smi.
1327  __ BranchOnNotSmi(key, &slow);
1328
1329  // Check that the object is a JS object. Load map into r2.
1330  __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
1331  __ b(lt, &slow);
1332
1333  // Check that the receiver does not require access checks.  We need
1334  // to check this explicitly since this generic stub does not perform
1335  // map checks.
1336  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1337  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
1338  __ b(ne, &slow);
1339
1340  // Check that the elements array is the appropriate type of
1341  // ExternalArray.
1342  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1343  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
1344  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
1345  __ cmp(r2, ip);
1346  __ b(ne, &slow);
1347
1348  // Check that the index is in range.
1349  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1350  __ cmp(ip, Operand(key, ASR, kSmiTagSize));
1351  // Unsigned comparison catches both negative and too-large values.
1352  __ b(lo, &slow);
1353
1354  // r3: elements array
1355  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1356  // r3: base pointer of external storage
1357
1358  // We are not untagging smi key and instead work with it
1359  // as if it was premultiplied by 2.
1360  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
1361
1362  Register value = r2;
1363  switch (array_type) {
1364    case kExternalByteArray:
1365      __ ldrsb(value, MemOperand(r3, key, LSR, 1));
1366      break;
1367    case kExternalUnsignedByteArray:
1368      __ ldrb(value, MemOperand(r3, key, LSR, 1));
1369      break;
1370    case kExternalShortArray:
1371      __ ldrsh(value, MemOperand(r3, key, LSL, 0));
1372      break;
1373    case kExternalUnsignedShortArray:
1374      __ ldrh(value, MemOperand(r3, key, LSL, 0));
1375      break;
1376    case kExternalIntArray:
1377    case kExternalUnsignedIntArray:
1378      __ ldr(value, MemOperand(r3, key, LSL, 1));
1379      break;
1380    case kExternalFloatArray:
1381      if (CpuFeatures::IsSupported(VFP3)) {
1382        CpuFeatures::Scope scope(VFP3);
1383        __ add(r2, r3, Operand(key, LSL, 1));
1384        __ vldr(s0, r2, 0);
1385      } else {
1386        __ ldr(value, MemOperand(r3, key, LSL, 1));
1387      }
1388      break;
1389    default:
1390      UNREACHABLE();
1391      break;
1392  }
1393
1394  // For integer array types:
1395  // r2: value
1396  // For floating-point array type
1397  // s0: value (if VFP3 is supported)
1398  // r2: value (if VFP3 is not supported)
1399
1400  if (array_type == kExternalIntArray) {
1401    // For the Int and UnsignedInt array types, we need to see whether
1402    // the value can be represented in a Smi. If not, we need to convert
1403    // it to a HeapNumber.
1404    Label box_int;
1405    __ cmp(value, Operand(0xC0000000));
1406    __ b(mi, &box_int);
1407    // Tag integer as smi and return it.
1408    __ mov(r0, Operand(value, LSL, kSmiTagSize));
1409    __ Ret();
1410
1411    __ bind(&box_int);
1412    // Allocate a HeapNumber for the result and perform int-to-double
1413    // conversion.  Don't touch r0 or r1 as they are needed if allocation
1414    // fails.
1415    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1416    __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
1417    // Now we can use r0 for the result as key is not needed any more.
1418    __ mov(r0, r5);
1419
1420    if (CpuFeatures::IsSupported(VFP3)) {
1421      CpuFeatures::Scope scope(VFP3);
1422      __ vmov(s0, value);
1423      __ vcvt_f64_s32(d0, s0);
1424      __ sub(r3, r0, Operand(kHeapObjectTag));
1425      __ vstr(d0, r3, HeapNumber::kValueOffset);
1426      __ Ret();
1427    } else {
1428      WriteInt32ToHeapNumberStub stub(value, r0, r3);
1429      __ TailCallStub(&stub);
1430    }
1431  } else if (array_type == kExternalUnsignedIntArray) {
1432    // The test is different for unsigned int values. Since we need
1433    // the value to be in the range of a positive smi, we can't
1434    // handle either of the top two bits being set in the value.
1435    if (CpuFeatures::IsSupported(VFP3)) {
1436      CpuFeatures::Scope scope(VFP3);
1437      Label box_int, done;
1438      __ tst(value, Operand(0xC0000000));
1439      __ b(ne, &box_int);
1440      // Tag integer as smi and return it.
1441      __ mov(r0, Operand(value, LSL, kSmiTagSize));
1442      __ Ret();
1443
1444      __ bind(&box_int);
1445      __ vmov(s0, value);
1446      // Allocate a HeapNumber for the result and perform int-to-double
1447      // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
1448      // registers - also when jumping due to exhausted young space.
1449      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1450      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
1451
1452      __ vcvt_f64_u32(d0, s0);
1453      __ sub(r1, r2, Operand(kHeapObjectTag));
1454      __ vstr(d0, r1, HeapNumber::kValueOffset);
1455
1456      __ mov(r0, r2);
1457      __ Ret();
1458    } else {
1459      // Check whether unsigned integer fits into smi.
1460      Label box_int_0, box_int_1, done;
1461      __ tst(value, Operand(0x80000000));
1462      __ b(ne, &box_int_0);
1463      __ tst(value, Operand(0x40000000));
1464      __ b(ne, &box_int_1);
1465      // Tag integer as smi and return it.
1466      __ mov(r0, Operand(value, LSL, kSmiTagSize));
1467      __ Ret();
1468
1469      Register hiword = value;  // r2.
1470      Register loword = r3;
1471
1472      __ bind(&box_int_0);
1473      // Integer does not have leading zeros.
1474      GenerateUInt2Double(masm, hiword, loword, r4, 0);
1475      __ b(&done);
1476
1477      __ bind(&box_int_1);
1478      // Integer has one leading zero.
1479      GenerateUInt2Double(masm, hiword, loword, r4, 1);
1480
1481
1482      __ bind(&done);
1483      // Integer was converted to double in registers hiword:loword.
1484      // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
1485      // clobbers all registers - also when jumping due to exhausted young
1486      // space.
1487      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1488      __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
1489
1490      __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
1491      __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
1492
1493      __ mov(r0, r4);
1494      __ Ret();
1495    }
1496  } else if (array_type == kExternalFloatArray) {
1497    // For the floating-point array type, we need to always allocate a
1498    // HeapNumber.
1499    if (CpuFeatures::IsSupported(VFP3)) {
1500      CpuFeatures::Scope scope(VFP3);
1501      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1502      // AllocateHeapNumber clobbers all registers - also when jumping due to
1503      // exhausted young space.
1504      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1505      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
1506      __ vcvt_f64_f32(d0, s0);
1507      __ sub(r1, r2, Operand(kHeapObjectTag));
1508      __ vstr(d0, r1, HeapNumber::kValueOffset);
1509
1510      __ mov(r0, r2);
1511      __ Ret();
1512    } else {
1513      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1514      // AllocateHeapNumber clobbers all registers - also when jumping due to
1515      // exhausted young space.
1516      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
1517      __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
1518      // VFP is not available, do manual single to double conversion.
1519
1520      // r2: floating point value (binary32)
1521      // r3: heap number for result
1522
1523      // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
1524      // the slow case from here.
1525      __ and_(r0, value, Operand(kBinary32MantissaMask));
1526
1527      // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
1528      // the slow case from here.
1529      __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
1530      __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
1531
1532      Label exponent_rebiased;
1533      __ teq(r1, Operand(0x00));
1534      __ b(eq, &exponent_rebiased);
1535
1536      __ teq(r1, Operand(0xff));
1537      __ mov(r1, Operand(0x7ff), LeaveCC, eq);
1538      __ b(eq, &exponent_rebiased);
1539
1540      // Rebias exponent.
1541      __ add(r1,
1542             r1,
1543             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
1544
1545      __ bind(&exponent_rebiased);
1546      __ and_(r2, value, Operand(kBinary32SignMask));
1547      value = no_reg;
1548      __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
1549
1550      // Shift mantissa.
1551      static const int kMantissaShiftForHiWord =
1552          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
1553
1554      static const int kMantissaShiftForLoWord =
1555          kBitsPerInt - kMantissaShiftForHiWord;
1556
1557      __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
1558      __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
1559
1560      __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
1561      __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
1562
1563      __ mov(r0, r3);
1564      __ Ret();
1565    }
1566
1567  } else {
1568    // Tag integer as smi and return it.
1569    __ mov(r0, Operand(value, LSL, kSmiTagSize));
1570    __ Ret();
1571  }
1572
1573  // Slow case, key and receiver still in r0 and r1.
1574  __ bind(&slow);
1575  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
1576  GenerateRuntimeGetProperty(masm);
1577}
1578
1579
1580void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1581  // ---------- S t a t e --------------
1582  //  -- lr     : return address
1583  //  -- r0     : key
1584  //  -- r1     : receiver
1585  // -----------------------------------
1586  Label slow;
1587
1588  // Check that the receiver isn't a smi.
1589  __ BranchOnSmi(r1, &slow);
1590
1591  // Check that the key is an array index, that is Uint32.
1592  __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
1593  __ b(ne, &slow);
1594
1595  // Get the map of the receiver.
1596  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1597
1598  // Check that it has indexed interceptor and access checks
1599  // are not enabled for this object.
1600  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1601  __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
1602  __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
1603  __ b(ne, &slow);
1604
1605  // Everything is fine, call runtime.
1606  __ Push(r1, r0);  // Receiver, key.
1607
1608  // Perform tail call to the entry.
1609  __ TailCallExternalReference(ExternalReference(
1610        IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
1611
1612  __ bind(&slow);
1613  GenerateMiss(masm);
1614}
1615
1616
1617void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1618  // ---------- S t a t e --------------
1619  //  -- r0     : value
1620  //  -- r1     : key
1621  //  -- r2     : receiver
1622  //  -- lr     : return address
1623  // -----------------------------------
1624
1625  // Push receiver, key and value for runtime call.
1626  __ Push(r2, r1, r0);
1627
1628  ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
1629  __ TailCallExternalReference(ref, 3, 1);
1630}
1631
1632
1633void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
1634  // ---------- S t a t e --------------
1635  //  -- r0     : value
1636  //  -- r1     : key
1637  //  -- r2     : receiver
1638  //  -- lr     : return address
1639  // -----------------------------------
1640
1641  // Push receiver, key and value for runtime call.
1642  __ Push(r2, r1, r0);
1643
1644  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
1645}
1646
1647
1648void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
1649  // ---------- S t a t e --------------
1650  //  -- r0     : value
1651  //  -- r1     : key
1652  //  -- r2     : receiver
1653  //  -- lr     : return address
1654  // -----------------------------------
1655  Label slow, fast, array, extra, check_pixel_array;
1656
1657  // Register usage.
1658  Register value = r0;
1659  Register key = r1;
1660  Register receiver = r2;
1661  Register elements = r3;  // Elements array of the receiver.
1662  // r4 and r5 are used as general scratch registers.
1663
1664  // Check that the key is a smi.
1665  __ tst(key, Operand(kSmiTagMask));
1666  __ b(ne, &slow);
1667  // Check that the object isn't a smi.
1668  __ tst(receiver, Operand(kSmiTagMask));
1669  __ b(eq, &slow);
1670  // Get the map of the object.
1671  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
1672  // Check that the receiver does not require access checks.  We need
1673  // to do this because this generic stub does not perform map checks.
1674  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
1675  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1676  __ b(ne, &slow);
1677  // Check if the object is a JS array or not.
1678  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1679  __ cmp(r4, Operand(JS_ARRAY_TYPE));
1680  __ b(eq, &array);
1681  // Check that the object is some kind of JS object.
1682  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1683  __ b(lt, &slow);
1684
1685  // Object case: Check key against length in the elements array.
1686  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1687  // Check that the object is in fast mode and writable.
1688  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1689  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1690  __ cmp(r4, ip);
1691  __ b(ne, &check_pixel_array);
1692  // Check array bounds. Both the key and the length of FixedArray are smis.
1693  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1694  __ cmp(key, Operand(ip));
1695  __ b(lo, &fast);
1696
1697  // Slow case, handle jump to runtime.
1698  __ bind(&slow);
1699  // Entry registers are intact.
1700  // r0: value.
1701  // r1: key.
1702  // r2: receiver.
1703  GenerateRuntimeSetProperty(masm);
1704
1705  // Check whether the elements is a pixel array.
1706  // r4: elements map.
1707  __ bind(&check_pixel_array);
1708  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
1709  __ cmp(r4, ip);
1710  __ b(ne, &slow);
1711  // Check that the value is a smi. If a conversion is needed call into the
1712  // runtime to convert and clamp.
1713  __ BranchOnNotSmi(value, &slow);
1714  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the key.
1715  __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
1716  __ cmp(r4, Operand(ip));
1717  __ b(hs, &slow);
1718  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
1719  __ Usat(r5, 8, Operand(r5));  // Clamp the value to [0..255].
1720
1721  // Get the pointer to the external array. This clobbers elements.
1722  __ ldr(elements,
1723         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
1724  __ strb(r5, MemOperand(elements, r4));  // Elements is now external array.
1725  __ Ret();
1726
1727  // Extra capacity case: Check if there is extra capacity to
1728  // perform the store and update the length. Used for adding one
1729  // element to the array by writing to array[array.length].
1730  __ bind(&extra);
1731  // Condition code from comparing key and array length is still available.
1732  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
1733  // Check for room in the elements backing store.
1734  // Both the key and the length of FixedArray are smis.
1735  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1736  __ cmp(key, Operand(ip));
1737  __ b(hs, &slow);
1738  // Calculate key + 1 as smi.
1739  ASSERT_EQ(0, kSmiTag);
1740  __ add(r4, key, Operand(Smi::FromInt(1)));
1741  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1742  __ b(&fast);
1743
1744  // Array case: Get the length and the elements array from the JS
1745  // array. Check that the array is in fast mode (and writable); if it
1746  // is the length is always a smi.
1747  __ bind(&array);
1748  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1749  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1750  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1751  __ cmp(r4, ip);
1752  __ b(ne, &slow);
1753
1754  // Check the key against the length in the array.
1755  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1756  __ cmp(key, Operand(ip));
1757  __ b(hs, &extra);
1758  // Fall through to fast case.
1759
1760  __ bind(&fast);
1761  // Fast case, store the value to the elements backing store.
1762  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1763  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
1764  __ str(value, MemOperand(r5));
1765  // Skip write barrier if the written value is a smi.
1766  __ tst(value, Operand(kSmiTagMask));
1767  __ Ret(eq);
1768  // Update write barrier for the elements array address.
1769  __ sub(r4, r5, Operand(elements));
1770  __ RecordWrite(elements, Operand(r4), r5, r6);
1771
1772  __ Ret();
1773}
1774
1775
1776// Convert and store int passed in register ival to IEEE 754 single precision
1777// floating point value at memory location (dst + 4 * wordoffset)
1778// If VFP3 is available use it for conversion.
1779static void StoreIntAsFloat(MacroAssembler* masm,
1780                            Register dst,
1781                            Register wordoffset,
1782                            Register ival,
1783                            Register fval,
1784                            Register scratch1,
1785                            Register scratch2) {
1786  if (CpuFeatures::IsSupported(VFP3)) {
1787    CpuFeatures::Scope scope(VFP3);
1788    __ vmov(s0, ival);
1789    __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
1790    __ vcvt_f32_s32(s0, s0);
1791    __ vstr(s0, scratch1, 0);
1792  } else {
1793    Label not_special, done;
1794    // Move sign bit from source to destination.  This works because the sign
1795    // bit in the exponent word of the double has the same position and polarity
1796    // as the 2's complement sign bit in a Smi.
1797    ASSERT(kBinary32SignMask == 0x80000000u);
1798
1799    __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
1800    // Negate value if it is negative.
1801    __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1802
1803    // We have -1, 0 or 1, which we treat specially. Register ival contains
1804    // absolute value: it is either equal to 1 (special case of -1 and 1),
1805    // greater than 1 (not a special case) or less than 1 (special case of 0).
1806    __ cmp(ival, Operand(1));
1807    __ b(gt, &not_special);
1808
1809    // For 1 or -1 we need to or in the 0 exponent (biased).
1810    static const uint32_t exponent_word_for_1 =
1811        kBinary32ExponentBias << kBinary32ExponentShift;
1812
1813    __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
1814    __ b(&done);
1815
1816    __ bind(&not_special);
1817    // Count leading zeros.
1818    // Gets the wrong answer for 0, but we already checked for that case above.
1819    Register zeros = scratch2;
1820    __ CountLeadingZeros(zeros, ival, scratch1);
1821
1822    // Compute exponent and or it into the exponent register.
1823    __ rsb(scratch1,
1824           zeros,
1825           Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
1826
1827    __ orr(fval,
1828           fval,
1829           Operand(scratch1, LSL, kBinary32ExponentShift));
1830
1831    // Shift up the source chopping the top bit off.
1832    __ add(zeros, zeros, Operand(1));
1833    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
1834    __ mov(ival, Operand(ival, LSL, zeros));
1835    // And the top (top 20 bits).
1836    __ orr(fval,
1837           fval,
1838           Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
1839
1840    __ bind(&done);
1841    __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
1842  }
1843}
1844
1845
1846static bool IsElementTypeSigned(ExternalArrayType array_type) {
1847  switch (array_type) {
1848    case kExternalByteArray:
1849    case kExternalShortArray:
1850    case kExternalIntArray:
1851      return true;
1852
1853    case kExternalUnsignedByteArray:
1854    case kExternalUnsignedShortArray:
1855    case kExternalUnsignedIntArray:
1856      return false;
1857
1858    default:
1859      UNREACHABLE();
1860      return false;
1861  }
1862}
1863
1864
1865void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
1866                                         ExternalArrayType array_type) {
1867  // ---------- S t a t e --------------
1868  //  -- r0     : value
1869  //  -- r1     : key
1870  //  -- r2     : receiver
1871  //  -- lr     : return address
1872  // -----------------------------------
1873  Label slow, check_heap_number;
1874
1875  // Register usage.
1876  Register value = r0;
1877  Register key = r1;
1878  Register receiver = r2;
1879  // r3 mostly holds the elements array or the destination external array.
1880
1881  // Check that the object isn't a smi.
1882  __ BranchOnSmi(receiver, &slow);
1883
1884  // Check that the object is a JS object. Load map into r3.
1885  __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
1886  __ b(le, &slow);
1887
1888  // Check that the receiver does not require access checks.  We need
1889  // to do this because this generic stub does not perform map checks.
1890  __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
1891  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1892  __ b(ne, &slow);
1893
1894  // Check that the key is a smi.
1895  __ BranchOnNotSmi(key, &slow);
1896
1897  // Check that the elements array is the appropriate type of ExternalArray.
1898  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1899  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1900  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
1901  __ cmp(r4, ip);
1902  __ b(ne, &slow);
1903
1904  // Check that the index is in range.
1905  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the index.
1906  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1907  __ cmp(r4, ip);
1908  // Unsigned comparison catches both negative and too-large values.
1909  __ b(hs, &slow);
1910
1911  // Handle both smis and HeapNumbers in the fast path. Go to the
1912  // runtime for all other kinds of values.
1913  // r3: external array.
1914  // r4: key (integer).
1915  __ BranchOnNotSmi(value, &check_heap_number);
1916  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
1917  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1918
1919  // r3: base pointer of external storage.
1920  // r4: key (integer).
1921  // r5: value (integer).
1922  switch (array_type) {
1923    case kExternalByteArray:
1924    case kExternalUnsignedByteArray:
1925      __ strb(r5, MemOperand(r3, r4, LSL, 0));
1926      break;
1927    case kExternalShortArray:
1928    case kExternalUnsignedShortArray:
1929      __ strh(r5, MemOperand(r3, r4, LSL, 1));
1930      break;
1931    case kExternalIntArray:
1932    case kExternalUnsignedIntArray:
1933      __ str(r5, MemOperand(r3, r4, LSL, 2));
1934      break;
1935    case kExternalFloatArray:
1936      // Perform int-to-float conversion and store to memory.
1937      StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
1938      break;
1939    default:
1940      UNREACHABLE();
1941      break;
1942  }
1943
1944  // Entry registers are intact, r0 holds the value which is the return value.
1945  __ Ret();
1946
1947
1948  // r3: external array.
1949  // r4: index (integer).
1950  __ bind(&check_heap_number);
1951  __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
1952  __ b(ne, &slow);
1953
1954  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1955
1956  // r3: base pointer of external storage.
1957  // r4: key (integer).
1958
1959  // The WebGL specification leaves the behavior of storing NaN and
1960  // +/-Infinity into integer arrays basically undefined. For more
1961  // reproducible behavior, convert these to zero.
1962  if (CpuFeatures::IsSupported(VFP3)) {
1963    CpuFeatures::Scope scope(VFP3);
1964
1965
1966    if (array_type == kExternalFloatArray) {
1967      // vldr requires offset to be a multiple of 4 so we can not
1968      // include -kHeapObjectTag into it.
1969      __ sub(r5, r0, Operand(kHeapObjectTag));
1970      __ vldr(d0, r5, HeapNumber::kValueOffset);
1971      __ add(r5, r3, Operand(r4, LSL, 2));
1972      __ vcvt_f32_f64(s0, d0);
1973      __ vstr(s0, r5, 0);
1974    } else {
1975      // Need to perform float-to-int conversion.
1976      // Test for NaN or infinity (both give zero).
1977      __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
1978
1979      // Hoisted load.  vldr requires offset to be a multiple of 4 so we can not
1980      // include -kHeapObjectTag into it.
1981      __ sub(r5, r0, Operand(kHeapObjectTag));
1982      __ vldr(d0, r5, HeapNumber::kValueOffset);
1983
1984      __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1985      // NaNs and Infinities have all-one exponents so they sign extend to -1.
1986      __ cmp(r6, Operand(-1));
1987      __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
1988
1989      // Not infinity or NaN simply convert to int.
1990      if (IsElementTypeSigned(array_type)) {
1991        __ vcvt_s32_f64(s0, d0, ne);
1992      } else {
1993        __ vcvt_u32_f64(s0, d0, ne);
1994      }
1995      __ vmov(r5, s0, ne);
1996
1997      switch (array_type) {
1998        case kExternalByteArray:
1999        case kExternalUnsignedByteArray:
2000          __ strb(r5, MemOperand(r3, r4, LSL, 0));
2001          break;
2002        case kExternalShortArray:
2003        case kExternalUnsignedShortArray:
2004          __ strh(r5, MemOperand(r3, r4, LSL, 1));
2005          break;
2006        case kExternalIntArray:
2007        case kExternalUnsignedIntArray:
2008          __ str(r5, MemOperand(r3, r4, LSL, 2));
2009          break;
2010        default:
2011          UNREACHABLE();
2012          break;
2013      }
2014    }
2015
2016    // Entry registers are intact, r0 holds the value which is the return value.
2017    __ Ret();
2018  } else {
2019    // VFP3 is not available do manual conversions.
2020    __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
2021    __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2022
2023    if (array_type == kExternalFloatArray) {
2024      Label done, nan_or_infinity_or_zero;
2025      static const int kMantissaInHiWordShift =
2026          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
2027
2028      static const int kMantissaInLoWordShift =
2029          kBitsPerInt - kMantissaInHiWordShift;
2030
2031      // Test for all special exponent values: zeros, subnormal numbers, NaNs
2032      // and infinities. All these should be converted to 0.
2033      __ mov(r7, Operand(HeapNumber::kExponentMask));
2034      __ and_(r9, r5, Operand(r7), SetCC);
2035      __ b(eq, &nan_or_infinity_or_zero);
2036
2037      __ teq(r9, Operand(r7));
2038      __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
2039      __ b(eq, &nan_or_infinity_or_zero);
2040
2041      // Rebias exponent.
2042      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
2043      __ add(r9,
2044             r9,
2045             Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
2046
2047      __ cmp(r9, Operand(kBinary32MaxExponent));
2048      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
2049      __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
2050      __ b(gt, &done);
2051
2052      __ cmp(r9, Operand(kBinary32MinExponent));
2053      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
2054      __ b(lt, &done);
2055
2056      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
2057      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
2058      __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
2059      __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
2060      __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
2061
2062      __ bind(&done);
2063      __ str(r5, MemOperand(r3, r4, LSL, 2));
2064      // Entry registers are intact, r0 holds the value which is the return
2065      // value.
2066      __ Ret();
2067
2068      __ bind(&nan_or_infinity_or_zero);
2069      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
2070      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
2071      __ orr(r9, r9, r7);
2072      __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
2073      __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
2074      __ b(&done);
2075    } else {
2076      bool is_signed_type = IsElementTypeSigned(array_type);
2077      int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
2078      int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
2079
2080      Label done, sign;
2081
2082      // Test for all special exponent values: zeros, subnormal numbers, NaNs
2083      // and infinities. All these should be converted to 0.
2084      __ mov(r7, Operand(HeapNumber::kExponentMask));
2085      __ and_(r9, r5, Operand(r7), SetCC);
2086      __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
2087      __ b(eq, &done);
2088
2089      __ teq(r9, Operand(r7));
2090      __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
2091      __ b(eq, &done);
2092
2093      // Unbias exponent.
2094      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
2095      __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
2096      // If exponent is negative than result is 0.
2097      __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
2098      __ b(mi, &done);
2099
2100      // If exponent is too big than result is minimal value.
2101      __ cmp(r9, Operand(meaningfull_bits - 1));
2102      __ mov(r5, Operand(min_value), LeaveCC, ge);
2103      __ b(ge, &done);
2104
2105      __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
2106      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
2107      __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
2108
2109      __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
2110      __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
2111      __ b(pl, &sign);
2112
2113      __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
2114      __ mov(r5, Operand(r5, LSL, r9));
2115      __ rsb(r9, r9, Operand(meaningfull_bits));
2116      __ orr(r5, r5, Operand(r6, LSR, r9));
2117
2118      __ bind(&sign);
2119      __ teq(r7, Operand(0, RelocInfo::NONE));
2120      __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
2121
2122      __ bind(&done);
2123      switch (array_type) {
2124        case kExternalByteArray:
2125        case kExternalUnsignedByteArray:
2126          __ strb(r5, MemOperand(r3, r4, LSL, 0));
2127          break;
2128        case kExternalShortArray:
2129        case kExternalUnsignedShortArray:
2130          __ strh(r5, MemOperand(r3, r4, LSL, 1));
2131          break;
2132        case kExternalIntArray:
2133        case kExternalUnsignedIntArray:
2134          __ str(r5, MemOperand(r3, r4, LSL, 2));
2135          break;
2136        default:
2137          UNREACHABLE();
2138          break;
2139      }
2140    }
2141  }
2142
2143  // Slow case: call runtime.
2144  __ bind(&slow);
2145
2146  // Entry registers are intact.
2147  // r0: value
2148  // r1: key
2149  // r2: receiver
2150  GenerateRuntimeSetProperty(masm);
2151}
2152
2153
2154void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
2155  // ----------- S t a t e -------------
2156  //  -- r0    : value
2157  //  -- r1    : receiver
2158  //  -- r2    : name
2159  //  -- lr    : return address
2160  // -----------------------------------
2161
2162  // Get the receiver from the stack and probe the stub cache.
2163  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
2164                                         NOT_IN_LOOP,
2165                                         MONOMORPHIC);
2166  StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
2167
2168  // Cache miss: Jump to runtime.
2169  GenerateMiss(masm);
2170}
2171
2172
2173void StoreIC::GenerateMiss(MacroAssembler* masm) {
2174  // ----------- S t a t e -------------
2175  //  -- r0    : value
2176  //  -- r1    : receiver
2177  //  -- r2    : name
2178  //  -- lr    : return address
2179  // -----------------------------------
2180
2181  __ Push(r1, r2, r0);
2182
2183  // Perform tail call to the entry.
2184  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
2185  __ TailCallExternalReference(ref, 3, 1);
2186}
2187
2188
2189void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
2190  // ----------- S t a t e -------------
2191  //  -- r0    : value
2192  //  -- r1    : receiver
2193  //  -- r2    : name
2194  //  -- lr    : return address
2195  // -----------------------------------
2196  //
2197  // This accepts as a receiver anything JSObject::SetElementsLength accepts
2198  // (currently anything except for external and pixel arrays which means
2199  // anything with elements of FixedArray type.), but currently is restricted
2200  // to JSArray.
2201  // Value must be a number, but only smis are accepted as the most common case.
2202
2203  Label miss;
2204
2205  Register receiver = r1;
2206  Register value = r0;
2207  Register scratch = r3;
2208
2209  // Check that the receiver isn't a smi.
2210  __ BranchOnSmi(receiver, &miss);
2211
2212  // Check that the object is a JS array.
2213  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
2214  __ b(ne, &miss);
2215
2216  // Check that elements are FixedArray.
2217  // We rely on StoreIC_ArrayLength below to deal with all types of
2218  // fast elements (including COW).
2219  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2220  __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
2221  __ b(ne, &miss);
2222
2223  // Check that value is a smi.
2224  __ BranchOnNotSmi(value, &miss);
2225
2226  // Prepare tail call to StoreIC_ArrayLength.
2227  __ Push(receiver, value);
2228
2229  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
2230  __ TailCallExternalReference(ref, 2, 1);
2231
2232  __ bind(&miss);
2233
2234  GenerateMiss(masm);
2235}
2236
2237
2238void StoreIC::GenerateNormal(MacroAssembler* masm) {
2239  // ----------- S t a t e -------------
2240  //  -- r0    : value
2241  //  -- r1    : receiver
2242  //  -- r2    : name
2243  //  -- lr    : return address
2244  // -----------------------------------
2245  Label miss;
2246
2247  GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
2248
2249  GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
2250  __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
2251  __ Ret();
2252
2253  __ bind(&miss);
2254  __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
2255  GenerateMiss(masm);
2256}
2257
2258
2259#undef __
2260
2261
2262} }  // namespace v8::internal
2263
2264#endif  // V8_TARGET_ARCH_ARM
2265