ic-arm.cc revision 7f4d5bd8c03935e2c0cd412e561b8fc5a6a880ae
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "assembler-arm.h"
33#include "codegen.h"
34#include "codegen-inl.h"
35#include "disasm.h"
36#include "ic-inl.h"
37#include "runtime.h"
38#include "stub-cache.h"
39
40namespace v8 {
41namespace internal {
42
43
44// ----------------------------------------------------------------------------
45// Static IC stub generators.
46//
47
48#define __ ACCESS_MASM(masm)
49
50// Helper function used from LoadIC/CallIC GenerateNormal.
51// receiver: Receiver. It is not clobbered if a jump to the miss label is
52//           done
53// name:     Property name. It is not clobbered if a jump to the miss label is
54//           done
55// result:   Register for the result. It is only updated if a jump to the miss
56//           label is not done. Can be the same as receiver or name clobbering
57//           one of these in the case of not jumping to the miss label.
58// The three scratch registers need to be different from the receiver, name and
59// result.
60static void GenerateDictionaryLoad(MacroAssembler* masm,
61                                   Label* miss,
62                                   Register receiver,
63                                   Register name,
64                                   Register result,
65                                   Register scratch1,
66                                   Register scratch2,
67                                   Register scratch3,
68                                   DictionaryCheck check_dictionary) {
69  // Main use of the scratch registers.
70  // scratch1: Used to hold the property dictionary.
71  // scratch2: Used as temporary and to hold the capacity of the property
72  //           dictionary.
73  // scratch3: Used as temporary.
74
75  Label done;
76
77  // Check for the absence of an interceptor.
78  // Load the map into scratch1.
79  __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
80
81  // Bail out if the receiver has a named interceptor.
82  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
83  __ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
84  __ b(nz, miss);
85
86  // Bail out if we have a JS global proxy object.
87  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
88  __ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
89  __ b(eq, miss);
90
91  // Possible work-around for http://crbug.com/16276.
92  // See also: http://codereview.chromium.org/155418.
93  __ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
94  __ b(eq, miss);
95  __ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
96  __ b(eq, miss);
97
98  // Load the properties array.
99  __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
100
101  // Check that the properties array is a dictionary.
102  if (check_dictionary == CHECK_DICTIONARY) {
103    __ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
104    __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
105    __ cmp(scratch2, ip);
106    __ b(ne, miss);
107  }
108
109  // Compute the capacity mask.
110  const int kCapacityOffset = StringDictionary::kHeaderSize +
111      StringDictionary::kCapacityIndex * kPointerSize;
112  __ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
113  __ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize));  // convert smi to int
114  __ sub(scratch2, scratch2, Operand(1));
115
116  const int kElementsStartOffset = StringDictionary::kHeaderSize +
117      StringDictionary::kElementsStartIndex * kPointerSize;
118
119  // Generate an unrolled loop that performs a few probes before
120  // giving up. Measurements done on Gmail indicate that 2 probes
121  // cover ~93% of loads from dictionaries.
122  static const int kProbes = 4;
123  for (int i = 0; i < kProbes; i++) {
124    // Compute the masked index: (hash + i + i * i) & mask.
125    __ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
126    if (i > 0) {
127      // Add the probe offset (i + i * i) left shifted to avoid right shifting
128      // the hash in a separate instruction. The value hash + i + i * i is right
129      // shifted in the following and instruction.
130      ASSERT(StringDictionary::GetProbeOffset(i) <
131             1 << (32 - String::kHashFieldOffset));
132      __ add(scratch3, scratch3, Operand(
133          StringDictionary::GetProbeOffset(i) << String::kHashShift));
134    }
135    __ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
136
137    // Scale the index by multiplying by the element size.
138    ASSERT(StringDictionary::kEntrySize == 3);
139    // scratch3 = scratch3 * 3.
140    __ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
141
142    // Check if the key is identical to the name.
143    __ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
144    __ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
145    __ cmp(name, Operand(ip));
146    if (i != kProbes - 1) {
147      __ b(eq, &done);
148    } else {
149      __ b(ne, miss);
150    }
151  }
152
153  // Check that the value is a normal property.
154  __ bind(&done);  // scratch3 == scratch1 + 4 * index
155  __ ldr(scratch2,
156         FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
157  __ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
158  __ b(ne, miss);
159
160  // Get the value at the masked, scaled index and return.
161  __ ldr(result,
162         FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
163}
164
165
166static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
167                                         Label* miss,
168                                         Register elements,
169                                         Register key,
170                                         Register t0,
171                                         Register t1,
172                                         Register t2) {
173  // Register use:
174  //
175  // elements - holds the slow-case elements of the receiver and is unchanged.
176  //
177  // key      - holds the smi key on entry and is unchanged if a branch is
178  //            performed to the miss label.
179  //            Holds the result on exit if the load succeeded.
180  //
181  // Scratch registers:
182  //
183  // t0 - holds the untagged key on entry and holds the hash once computed.
184  //
185  // t1 - used to hold the capacity mask of the dictionary
186  //
187  // t2 - used for the index into the dictionary.
188  Label done;
189
190  // Compute the hash code from the untagged key.  This must be kept in sync
191  // with ComputeIntegerHash in utils.h.
192  //
193  // hash = ~hash + (hash << 15);
194  __ mvn(t1, Operand(t0));
195  __ add(t0, t1, Operand(t0, LSL, 15));
196  // hash = hash ^ (hash >> 12);
197  __ eor(t0, t0, Operand(t0, LSR, 12));
198  // hash = hash + (hash << 2);
199  __ add(t0, t0, Operand(t0, LSL, 2));
200  // hash = hash ^ (hash >> 4);
201  __ eor(t0, t0, Operand(t0, LSR, 4));
202  // hash = hash * 2057;
203  __ mov(t1, Operand(2057));
204  __ mul(t0, t0, t1);
205  // hash = hash ^ (hash >> 16);
206  __ eor(t0, t0, Operand(t0, LSR, 16));
207
208  // Compute the capacity mask.
209  __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
210  __ mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
211  __ sub(t1, t1, Operand(1));
212
213  // Generate an unrolled loop that performs a few probes before giving up.
214  static const int kProbes = 4;
215  for (int i = 0; i < kProbes; i++) {
216    // Use t2 for index calculations and keep the hash intact in t0.
217    __ mov(t2, t0);
218    // Compute the masked index: (hash + i + i * i) & mask.
219    if (i > 0) {
220      __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
221    }
222    __ and_(t2, t2, Operand(t1));
223
224    // Scale the index by multiplying by the element size.
225    ASSERT(NumberDictionary::kEntrySize == 3);
226    __ add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
227
228    // Check if the key is identical to the name.
229    __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
230    __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
231    __ cmp(key, Operand(ip));
232    if (i != kProbes - 1) {
233      __ b(eq, &done);
234    } else {
235      __ b(ne, miss);
236    }
237  }
238
239  __ bind(&done);
240  // Check that the value is a normal property.
241  // t2: elements + (index * kPointerSize)
242  const int kDetailsOffset =
243      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
244  __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
245  __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
246  __ b(ne, miss);
247
248  // Get the value at the masked, scaled index and return.
249  const int kValueOffset =
250      NumberDictionary::kElementsStartOffset + kPointerSize;
251  __ ldr(key, FieldMemOperand(t2, kValueOffset));
252}
253
254
255void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
256  // ----------- S t a t e -------------
257  //  -- r2    : name
258  //  -- lr    : return address
259  //  -- r0    : receiver
260  //  -- sp[0] : receiver
261  // -----------------------------------
262  Label miss;
263
264  StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
265  __ bind(&miss);
266  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
267}
268
269
270void LoadIC::GenerateStringLength(MacroAssembler* masm) {
271  // ----------- S t a t e -------------
272  //  -- r2    : name
273  //  -- lr    : return address
274  //  -- r0    : receiver
275  //  -- sp[0] : receiver
276  // -----------------------------------
277  Label miss;
278
279  StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
280  // Cache miss: Jump to runtime.
281  __ bind(&miss);
282  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
283}
284
285
286void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
287  // ----------- S t a t e -------------
288  //  -- r2    : name
289  //  -- lr    : return address
290  //  -- r0    : receiver
291  //  -- sp[0] : receiver
292  // -----------------------------------
293  Label miss;
294
295  StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
296  __ bind(&miss);
297  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
298}
299
300
301// Defined in ic.cc.
302Object* CallIC_Miss(Arguments args);
303
304void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
305  // ----------- S t a t e -------------
306  //  -- r2    : name
307  //  -- lr    : return address
308  // -----------------------------------
309  Label number, non_number, non_string, boolean, probe, miss;
310
311  // Get the receiver of the function from the stack into r1.
312  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
313
314  // Probe the stub cache.
315  Code::Flags flags =
316      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
317  StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
318
319  // If the stub cache probing failed, the receiver might be a value.
320  // For value objects, we use the map of the prototype objects for
321  // the corresponding JSValue for the cache and that is what we need
322  // to probe.
323  //
324  // Check for number.
325  __ tst(r1, Operand(kSmiTagMask));
326  __ b(eq, &number);
327  __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
328  __ b(ne, &non_number);
329  __ bind(&number);
330  StubCompiler::GenerateLoadGlobalFunctionPrototype(
331      masm, Context::NUMBER_FUNCTION_INDEX, r1);
332  __ b(&probe);
333
334  // Check for string.
335  __ bind(&non_number);
336  __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
337  __ b(hs, &non_string);
338  StubCompiler::GenerateLoadGlobalFunctionPrototype(
339      masm, Context::STRING_FUNCTION_INDEX, r1);
340  __ b(&probe);
341
342  // Check for boolean.
343  __ bind(&non_string);
344  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
345  __ cmp(r1, ip);
346  __ b(eq, &boolean);
347  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
348  __ cmp(r1, ip);
349  __ b(ne, &miss);
350  __ bind(&boolean);
351  StubCompiler::GenerateLoadGlobalFunctionPrototype(
352      masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
353
354  // Probe the stub cache for the value object.
355  __ bind(&probe);
356  StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
357
358  // Cache miss: Jump to runtime.
359  __ bind(&miss);
360  GenerateMiss(masm, argc);
361}
362
363
364static void GenerateNormalHelper(MacroAssembler* masm,
365                                 int argc,
366                                 bool is_global_object,
367                                 Label* miss,
368                                 Register scratch) {
369  // Search dictionary - put result in register r1.
370  GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
371
372  // Check that the value isn't a smi.
373  __ tst(r1, Operand(kSmiTagMask));
374  __ b(eq, miss);
375
376  // Check that the value is a JSFunction.
377  __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
378  __ b(ne, miss);
379
380  // Patch the receiver with the global proxy if necessary.
381  if (is_global_object) {
382    __ ldr(r0, MemOperand(sp, argc * kPointerSize));
383    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
384    __ str(r0, MemOperand(sp, argc * kPointerSize));
385  }
386
387  // Invoke the function.
388  ParameterCount actual(argc);
389  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
390}
391
392
393void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
394  // ----------- S t a t e -------------
395  //  -- r2    : name
396  //  -- lr    : return address
397  // -----------------------------------
398  Label miss, global_object, non_global_object;
399
400  // Get the receiver of the function from the stack into r1.
401  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
402
403  // Check that the receiver isn't a smi.
404  __ tst(r1, Operand(kSmiTagMask));
405  __ b(eq, &miss);
406
407  // Check that the receiver is a valid JS object.  Put the map in r3.
408  __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
409  __ b(lt, &miss);
410
411  // If this assert fails, we have to check upper bound too.
412  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
413
414  // Check for access to global object.
415  __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
416  __ b(eq, &global_object);
417  __ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
418  __ b(ne, &non_global_object);
419
420  // Accessing global object: Load and invoke.
421  __ bind(&global_object);
422  // Check that the global object does not require access checks.
423  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
424  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
425  __ b(ne, &miss);
426  GenerateNormalHelper(masm, argc, true, &miss, r4);
427
428  // Accessing non-global object: Check for access to global proxy.
429  Label global_proxy, invoke;
430  __ bind(&non_global_object);
431  __ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
432  __ b(eq, &global_proxy);
433  // Check that the non-global, non-global-proxy object does not
434  // require access checks.
435  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
436  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
437  __ b(ne, &miss);
438  __ bind(&invoke);
439  GenerateNormalHelper(masm, argc, false, &miss, r4);
440
441  // Global object access: Check access rights.
442  __ bind(&global_proxy);
443  __ CheckAccessGlobalProxy(r1, r0, &miss);
444  __ b(&invoke);
445
446  // Cache miss: Jump to runtime.
447  __ bind(&miss);
448  GenerateMiss(masm, argc);
449}
450
451
452void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
453  // ----------- S t a t e -------------
454  //  -- r2    : name
455  //  -- lr    : return address
456  // -----------------------------------
457
458  // Get the receiver of the function from the stack.
459  __ ldr(r3, MemOperand(sp, argc * kPointerSize));
460
461  __ EnterInternalFrame();
462
463  // Push the receiver and the name of the function.
464  __ Push(r3, r2);
465
466  // Call the entry.
467  __ mov(r0, Operand(2));
468  __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
469
470  CEntryStub stub(1);
471  __ CallStub(&stub);
472
473  // Move result to r1 and leave the internal frame.
474  __ mov(r1, Operand(r0));
475  __ LeaveInternalFrame();
476
477  // Check if the receiver is a global object of some sort.
478  Label invoke, global;
479  __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
480  __ tst(r2, Operand(kSmiTagMask));
481  __ b(eq, &invoke);
482  __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
483  __ b(eq, &global);
484  __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
485  __ b(ne, &invoke);
486
487  // Patch the receiver on the stack.
488  __ bind(&global);
489  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
490  __ str(r2, MemOperand(sp, argc * kPointerSize));
491
492  // Invoke the function.
493  ParameterCount actual(argc);
494  __ bind(&invoke);
495  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
496}
497
498
499void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
500  UNREACHABLE();
501}
502
503
504void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
505  UNREACHABLE();
506}
507
508
509void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
510  UNREACHABLE();
511}
512
513
514// Defined in ic.cc.
515Object* LoadIC_Miss(Arguments args);
516
517void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
518  // ----------- S t a t e -------------
519  //  -- r2    : name
520  //  -- lr    : return address
521  //  -- r0    : receiver
522  //  -- sp[0] : receiver
523  // -----------------------------------
524
525  // Probe the stub cache.
526  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
527                                         NOT_IN_LOOP,
528                                         MONOMORPHIC);
529  StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
530
531  // Cache miss: Jump to runtime.
532  GenerateMiss(masm);
533}
534
535
536void LoadIC::GenerateNormal(MacroAssembler* masm) {
537  // ----------- S t a t e -------------
538  //  -- r2    : name
539  //  -- lr    : return address
540  //  -- r0    : receiver
541  //  -- sp[0] : receiver
542  // -----------------------------------
543  Label miss, probe, global;
544
545  // Check that the receiver isn't a smi.
546  __ tst(r0, Operand(kSmiTagMask));
547  __ b(eq, &miss);
548
549  // Check that the receiver is a valid JS object.  Put the map in r3.
550  __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
551  __ b(lt, &miss);
552  // If this assert fails, we have to check upper bound too.
553  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
554
555  // Check for access to global object (unlikely).
556  __ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
557  __ b(eq, &global);
558
559  // Check for non-global object that requires access check.
560  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
561  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
562  __ b(ne, &miss);
563
564  __ bind(&probe);
565  GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
566  __ Ret();
567
568  // Global object access: Check access rights.
569  __ bind(&global);
570  __ CheckAccessGlobalProxy(r0, r1, &miss);
571  __ b(&probe);
572
573  // Cache miss: Jump to runtime.
574  __ bind(&miss);
575  GenerateMiss(masm);
576}
577
578
579void LoadIC::GenerateMiss(MacroAssembler* masm) {
580  // ----------- S t a t e -------------
581  //  -- r2    : name
582  //  -- lr    : return address
583  //  -- r0    : receiver
584  //  -- sp[0] : receiver
585  // -----------------------------------
586
587  __ mov(r3, r0);
588  __ Push(r3, r2);
589
590  // Perform tail call to the entry.
591  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
592  __ TailCallExternalReference(ref, 2, 1);
593}
594
595
596static inline bool IsInlinedICSite(Address address,
597                                   Address* inline_end_address) {
598  // If the instruction after the call site is not the pseudo instruction nop1
599  // then this is not related to an inlined in-object property load. The nop1
600  // instruction is located just after the call to the IC in the deferred code
601  // handling the miss in the inlined code. After the nop1 instruction there is
602  // a branch instruction for jumping back from the deferred code.
603  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
604  Instr instr_after_call = Assembler::instr_at(address_after_call);
605  if (!Assembler::IsNop(instr_after_call, PROPERTY_ACCESS_INLINED)) {
606    return false;
607  }
608  Address address_after_nop = address_after_call + Assembler::kInstrSize;
609  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
610  // There may be some reg-reg move and frame merging code to skip over before
611  // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
612  // code.
613  while (!Assembler::IsBranch(instr_after_nop)) {
614    address_after_nop += Assembler::kInstrSize;
615    instr_after_nop = Assembler::instr_at(address_after_nop);
616  }
617
618  // Find the end of the inlined code for handling the load.
619  int b_offset =
620      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
621  ASSERT(b_offset < 0);  // Jumping back from deferred code.
622  *inline_end_address = address_after_nop + b_offset;
623
624  return true;
625}
626
627
628void LoadIC::ClearInlinedVersion(Address address) {
629  // Reset the map check of the inlined in-object property load (if present) to
630  // guarantee failure by holding an invalid map (the null value). The offset
631  // can be patched to anything.
632  PatchInlinedLoad(address, Heap::null_value(), 0);
633}
634
635
636bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
637  // Find the end of the inlined code for handling the load if this is an
638  // inlined IC call site.
639  Address inline_end_address;
640  if (!IsInlinedICSite(address, &inline_end_address)) return false;
641
642  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
643  // The immediate must be representable in 12 bits.
644  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
645  Address ldr_property_instr_address =
646      inline_end_address - Assembler::kInstrSize;
647  ASSERT(Assembler::IsLdrRegisterImmediate(
648      Assembler::instr_at(ldr_property_instr_address)));
649  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
650  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
651      ldr_property_instr, offset - kHeapObjectTag);
652  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
653
654  // Indicate that code has changed.
655  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
656
657  // Patch the map check.
658  Address ldr_map_instr_address =
659      inline_end_address - 4 * Assembler::kInstrSize;
660  Assembler::set_target_address_at(ldr_map_instr_address,
661                                   reinterpret_cast<Address>(map));
662  return true;
663}
664
665
666void KeyedLoadIC::ClearInlinedVersion(Address address) {
667  // Reset the map check of the inlined keyed load (if present) to
668  // guarantee failure by holding an invalid map (the null value).
669  PatchInlinedLoad(address, Heap::null_value());
670}
671
672
673bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
674  Address inline_end_address;
675  if (!IsInlinedICSite(address, &inline_end_address)) return false;
676
677  // Patch the map check.
678  Address ldr_map_instr_address =
679      inline_end_address -
680      (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
681      Assembler::kInstrSize);
682  Assembler::set_target_address_at(ldr_map_instr_address,
683                                   reinterpret_cast<Address>(map));
684  return true;
685}
686
687
688void KeyedStoreIC::ClearInlinedVersion(Address address) {
689  // Insert null as the elements map to check for.  This will make
690  // sure that the elements fast-case map check fails so that control
691  // flows to the IC instead of the inlined version.
692  PatchInlinedStore(address, Heap::null_value());
693}
694
695
696void KeyedStoreIC::RestoreInlinedVersion(Address address) {
697  // Restore the fast-case elements map check so that the inlined
698  // version can be used again.
699  PatchInlinedStore(address, Heap::fixed_array_map());
700}
701
702
703bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
704  // Find the end of the inlined code for handling the store if this is an
705  // inlined IC call site.
706  Address inline_end_address;
707  if (!IsInlinedICSite(address, &inline_end_address)) return false;
708
709  // Patch the map check.
710  Address ldr_map_instr_address =
711      inline_end_address -
712      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
713      Assembler::kInstrSize);
714  Assembler::set_target_address_at(ldr_map_instr_address,
715                                   reinterpret_cast<Address>(map));
716  return true;
717}
718
719
720Object* KeyedLoadIC_Miss(Arguments args);
721
722
723void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
724  // ---------- S t a t e --------------
725  //  -- lr     : return address
726  //  -- r0     : key
727  //  -- r1     : receiver
728  // -----------------------------------
729
730  __ Push(r1, r0);
731
732  ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
733  __ TailCallExternalReference(ref, 2, 1);
734}
735
736
737void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
738  // ---------- S t a t e --------------
739  //  -- lr     : return address
740  //  -- r0     : key
741  //  -- r1     : receiver
742  // -----------------------------------
743
744  __ Push(r1, r0);
745
746  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
747}
748
749
750void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
751  // ---------- S t a t e --------------
752  //  -- lr     : return address
753  //  -- r0     : key
754  //  -- r1     : receiver
755  // -----------------------------------
756  Label slow, check_string, index_smi, index_string;
757  Label check_pixel_array, probe_dictionary, check_number_dictionary;
758
759  Register key = r0;
760  Register receiver = r1;
761
762  // Check that the object isn't a smi.
763  __ BranchOnSmi(receiver, &slow);
764  // Get the map of the receiver.
765  __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
766  // Check bit field.
767  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
768  __ tst(r3, Operand(kSlowCaseBitFieldMask));
769  __ b(ne, &slow);
770  // Check that the object is some kind of JS object EXCEPT JS Value type.
771  // In the case that the object is a value-wrapper object,
772  // we enter the runtime system to make sure that indexing into string
773  // objects work as intended.
774  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
775  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
776  __ cmp(r2, Operand(JS_OBJECT_TYPE));
777  __ b(lt, &slow);
778
779  // Check that the key is a smi.
780  __ BranchOnNotSmi(key, &check_string);
781  __ bind(&index_smi);
782  // Now the key is known to be a smi. This place is also jumped to from below
783  // where a numeric string is converted to a smi.
784  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
785  // Check that the object is in fast mode (not dictionary).
786  __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
787  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
788  __ cmp(r3, ip);
789  __ b(ne, &check_pixel_array);
790  // Check that the key (index) is within bounds.
791  __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
792  __ cmp(key, Operand(r3));
793  __ b(hs, &slow);
794  // Fast case: Do the load.
795  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
796  // The key is a smi.
797  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
798  __ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
799  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
800  __ cmp(r2, ip);
801  // In case the loaded value is the_hole we have to consult GetProperty
802  // to ensure the prototype chain is searched.
803  __ b(eq, &slow);
804  __ mov(r0, r2);
805  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
806  __ Ret();
807
808  // Check whether the elements is a pixel array.
809  // r0: key
810  // r3: elements map
811  // r4: elements
812  __ bind(&check_pixel_array);
813  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
814  __ cmp(r3, ip);
815  __ b(ne, &check_number_dictionary);
816  __ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
817  __ mov(r2, Operand(key, ASR, kSmiTagSize));
818  __ cmp(r2, ip);
819  __ b(hs, &slow);
820  __ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
821  __ ldrb(r2, MemOperand(ip, r2));
822  __ mov(r0, Operand(r2, LSL, kSmiTagSize));  // Tag result as smi.
823  __ Ret();
824
825  __ bind(&check_number_dictionary);
826  // Check whether the elements is a number dictionary.
827  // r0: key
828  // r3: elements map
829  // r4: elements
830  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
831  __ cmp(r3, ip);
832  __ b(ne, &slow);
833  __ mov(r2, Operand(r0, ASR, kSmiTagSize));
834  GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
835  __ Ret();
836
837  // Slow case, key and receiver still in r0 and r1.
838  __ bind(&slow);
839  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
840  GenerateRuntimeGetProperty(masm);
841
842  __ bind(&check_string);
843  // The key is not a smi.
844  // Is it a string?
845  // r0: key
846  // r1: receiver
847  __ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
848  __ b(ge, &slow);
849
850  // Is the string an array index, with cached numeric value?
851  __ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
852  __ tst(r3, Operand(String::kContainsCachedArrayIndexMask));
853  __ b(eq, &index_string);
854
855  // Is the string a symbol?
856  // r2: key map
857  __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
858  ASSERT(kSymbolTag != 0);
859  __ tst(r3, Operand(kIsSymbolMask));
860  __ b(eq, &slow);
861
862  // If the receiver is a fast-case object, check the keyed lookup
863  // cache. Otherwise probe the dictionary.
864  __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
865  __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
866  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
867  __ cmp(r3, ip);
868  __ b(eq, &probe_dictionary);
869
870  // Load the map of the receiver, compute the keyed lookup cache hash
871  // based on 32 bits of the map pointer and the string hash.
872  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
873  __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
874  __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
875  __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
876  __ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
877
878  // Load the key (consisting of map and symbol) from the cache and
879  // check for match.
880  ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys();
881  __ mov(r4, Operand(cache_keys));
882  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
883  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));  // Move r4 to symbol.
884  __ cmp(r2, r5);
885  __ b(ne, &slow);
886  __ ldr(r5, MemOperand(r4));
887  __ cmp(r0, r5);
888  __ b(ne, &slow);
889
890  // Get field offset and check that it is an in-object property.
891  // r0     : key
892  // r1     : receiver
893  // r2     : receiver's map
894  // r3     : lookup cache index
895  ExternalReference cache_field_offsets
896      = ExternalReference::keyed_lookup_cache_field_offsets();
897  __ mov(r4, Operand(cache_field_offsets));
898  __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
899  __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
900  __ cmp(r5, r6);
901  __ b(ge, &slow);
902
903  // Load in-object property.
904  __ sub(r5, r5, r6);  // Index from end of object.
905  __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
906  __ add(r6, r6, r5);  // Index from start of object.
907  __ sub(r1, r1, Operand(kHeapObjectTag));  // Remove the heap tag.
908  __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
909  __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
910  __ Ret();
911
912  // Do a quick inline probe of the receiver's dictionary, if it
913  // exists.
914  __ bind(&probe_dictionary);
915  // Load the property to r0.
916  GenerateDictionaryLoad(
917      masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
918  __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
919  __ Ret();
920
921  __ b(&slow);
922  // If the hash field contains an array index pick it out. The assert checks
923  // that the constants for the maximum number of digits for an array index
924  // cached in the hash field and the number of bits reserved for it does not
925  // conflict.
926  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
927         (1 << String::kArrayIndexValueBits));
928  __ bind(&index_string);
929  // r0: key (string)
930  // r1: receiver
931  // r3: hash field
932  // We want the smi-tagged index in r0.  kArrayIndexValueMask has zeros in
933  // the low kHashShift bits.
934  ASSERT(String::kHashShift >= kSmiTagSize);
935  __ and_(r3, r3, Operand(String::kArrayIndexValueMask));
936  // Here we actually clobber the key (r0) which will be used if calling into
937  // runtime later. However as the new key is the numeric value of a string key
938  // there is no difference in using either key.
939  __ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
940  // Now jump to the place where smi keys are handled.
941  __ jmp(&index_smi);
942}
943
944
945void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
946  // ---------- S t a t e --------------
947  //  -- lr     : return address
948  //  -- r0     : key (index)
949  //  -- r1     : receiver
950  // -----------------------------------
951  Label miss;
952  Label index_out_of_range;
953
954  Register receiver = r1;
955  Register index = r0;
956  Register scratch1 = r2;
957  Register scratch2 = r3;
958  Register result = r0;
959
960  StringCharAtGenerator char_at_generator(receiver,
961                                          index,
962                                          scratch1,
963                                          scratch2,
964                                          result,
965                                          &miss,  // When not a string.
966                                          &miss,  // When not a number.
967                                          &index_out_of_range,
968                                          STRING_INDEX_IS_ARRAY_INDEX);
969  char_at_generator.GenerateFast(masm);
970  __ Ret();
971
972  ICRuntimeCallHelper call_helper;
973  char_at_generator.GenerateSlow(masm, call_helper);
974
975  __ bind(&index_out_of_range);
976  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
977  __ Ret();
978
979  __ bind(&miss);
980  GenerateMiss(masm);
981}
982
983
984// Convert unsigned integer with specified number of leading zeroes in binary
985// representation to IEEE 754 double.
986// Integer to convert is passed in register hiword.
987// Resulting double is returned in registers hiword:loword.
988// This functions does not work correctly for 0.
989static void GenerateUInt2Double(MacroAssembler* masm,
990                                Register hiword,
991                                Register loword,
992                                Register scratch,
993                                int leading_zeroes) {
994  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
995  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
996
997  const int mantissa_shift_for_hi_word =
998      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
999
1000  const int mantissa_shift_for_lo_word =
1001      kBitsPerInt - mantissa_shift_for_hi_word;
1002
1003  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1004  if (mantissa_shift_for_hi_word > 0) {
1005    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1006    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1007  } else {
1008    __ mov(loword, Operand(0));
1009    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1010  }
1011
1012  // If least significant bit of biased exponent was not 1 it was corrupted
1013  // by most significant bit of mantissa so we should fix that.
1014  if (!(biased_exponent & 1)) {
1015    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1016  }
1017}
1018
1019
1020void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
1021                                        ExternalArrayType array_type) {
1022  // ---------- S t a t e --------------
1023  //  -- lr     : return address
1024  //  -- r0     : key
1025  //  -- r1     : receiver
1026  // -----------------------------------
1027  Label slow, failed_allocation;
1028
1029  Register key = r0;
1030  Register receiver = r1;
1031
1032  // Check that the object isn't a smi
1033  __ BranchOnSmi(receiver, &slow);
1034
1035  // Check that the key is a smi.
1036  __ BranchOnNotSmi(key, &slow);
1037
1038  // Check that the object is a JS object. Load map into r2.
1039  __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
1040  __ b(lt, &slow);
1041
1042  // Check that the receiver does not require access checks.  We need
1043  // to check this explicitly since this generic stub does not perform
1044  // map checks.
1045  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1046  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
1047  __ b(ne, &slow);
1048
1049  // Check that the elements array is the appropriate type of
1050  // ExternalArray.
1051  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1052  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
1053  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
1054  __ cmp(r2, ip);
1055  __ b(ne, &slow);
1056
1057  // Check that the index is in range.
1058  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1059  __ cmp(ip, Operand(key, ASR, kSmiTagSize));
1060  // Unsigned comparison catches both negative and too-large values.
1061  __ b(lo, &slow);
1062
1063  // r3: elements array
1064  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1065  // r3: base pointer of external storage
1066
1067  // We are not untagging smi key and instead work with it
1068  // as if it was premultiplied by 2.
1069  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
1070
1071  Register value = r2;
1072  switch (array_type) {
1073    case kExternalByteArray:
1074      __ ldrsb(value, MemOperand(r3, key, LSR, 1));
1075      break;
1076    case kExternalUnsignedByteArray:
1077      __ ldrb(value, MemOperand(r3, key, LSR, 1));
1078      break;
1079    case kExternalShortArray:
1080      __ ldrsh(value, MemOperand(r3, key, LSL, 0));
1081      break;
1082    case kExternalUnsignedShortArray:
1083      __ ldrh(value, MemOperand(r3, key, LSL, 0));
1084      break;
1085    case kExternalIntArray:
1086    case kExternalUnsignedIntArray:
1087      __ ldr(value, MemOperand(r3, key, LSL, 1));
1088      break;
1089    case kExternalFloatArray:
1090      if (CpuFeatures::IsSupported(VFP3)) {
1091        CpuFeatures::Scope scope(VFP3);
1092        __ add(r2, r3, Operand(key, LSL, 1));
1093        __ vldr(s0, r2, 0);
1094      } else {
1095        __ ldr(value, MemOperand(r3, key, LSL, 1));
1096      }
1097      break;
1098    default:
1099      UNREACHABLE();
1100      break;
1101  }
1102
1103  // For integer array types:
1104  // r2: value
1105  // For floating-point array type
1106  // s0: value (if VFP3 is supported)
1107  // r2: value (if VFP3 is not supported)
1108
1109  if (array_type == kExternalIntArray) {
1110    // For the Int and UnsignedInt array types, we need to see whether
1111    // the value can be represented in a Smi. If not, we need to convert
1112    // it to a HeapNumber.
1113    Label box_int;
1114    __ cmp(value, Operand(0xC0000000));
1115    __ b(mi, &box_int);
1116    // Tag integer as smi and return it.
1117    __ mov(r0, Operand(value, LSL, kSmiTagSize));
1118    __ Ret();
1119
1120    __ bind(&box_int);
1121    // Allocate a HeapNumber for the result and perform int-to-double
1122    // conversion. Use r0 for result as key is not needed any more.
1123    __ AllocateHeapNumber(r0, r3, r4, &slow);
1124
1125    if (CpuFeatures::IsSupported(VFP3)) {
1126      CpuFeatures::Scope scope(VFP3);
1127      __ vmov(s0, value);
1128      __ vcvt_f64_s32(d0, s0);
1129      __ sub(r3, r0, Operand(kHeapObjectTag));
1130      __ vstr(d0, r3, HeapNumber::kValueOffset);
1131      __ Ret();
1132    } else {
1133      WriteInt32ToHeapNumberStub stub(value, r0, r3);
1134      __ TailCallStub(&stub);
1135    }
1136  } else if (array_type == kExternalUnsignedIntArray) {
1137    // The test is different for unsigned int values. Since we need
1138    // the value to be in the range of a positive smi, we can't
1139    // handle either of the top two bits being set in the value.
1140    if (CpuFeatures::IsSupported(VFP3)) {
1141      CpuFeatures::Scope scope(VFP3);
1142      Label box_int, done;
1143      __ tst(value, Operand(0xC0000000));
1144      __ b(ne, &box_int);
1145      // Tag integer as smi and return it.
1146      __ mov(r0, Operand(value, LSL, kSmiTagSize));
1147      __ Ret();
1148
1149      __ bind(&box_int);
1150      __ vmov(s0, value);
1151      // Allocate a HeapNumber for the result and perform int-to-double
1152      // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
1153      // registers - also when jumping due to exhausted young space.
1154      __ AllocateHeapNumber(r2, r3, r4, &slow);
1155
1156      __ vcvt_f64_u32(d0, s0);
1157      __ sub(r1, r2, Operand(kHeapObjectTag));
1158      __ vstr(d0, r1, HeapNumber::kValueOffset);
1159
1160      __ mov(r0, r2);
1161      __ Ret();
1162    } else {
1163      // Check whether unsigned integer fits into smi.
1164      Label box_int_0, box_int_1, done;
1165      __ tst(value, Operand(0x80000000));
1166      __ b(ne, &box_int_0);
1167      __ tst(value, Operand(0x40000000));
1168      __ b(ne, &box_int_1);
1169      // Tag integer as smi and return it.
1170      __ mov(r0, Operand(value, LSL, kSmiTagSize));
1171      __ Ret();
1172
1173      Register hiword = value;  // r2.
1174      Register loword = r3;
1175
1176      __ bind(&box_int_0);
1177      // Integer does not have leading zeros.
1178      GenerateUInt2Double(masm, hiword, loword, r4, 0);
1179      __ b(&done);
1180
1181      __ bind(&box_int_1);
1182      // Integer has one leading zero.
1183      GenerateUInt2Double(masm, hiword, loword, r4, 1);
1184
1185
1186      __ bind(&done);
1187      // Integer was converted to double in registers hiword:loword.
1188      // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
1189      // clobbers all registers - also when jumping due to exhausted young
1190      // space.
1191      __ AllocateHeapNumber(r4, r5, r6, &slow);
1192
1193      __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
1194      __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
1195
1196      __ mov(r0, r4);
1197      __ Ret();
1198    }
1199  } else if (array_type == kExternalFloatArray) {
1200    // For the floating-point array type, we need to always allocate a
1201    // HeapNumber.
1202    if (CpuFeatures::IsSupported(VFP3)) {
1203      CpuFeatures::Scope scope(VFP3);
1204      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1205      // AllocateHeapNumber clobbers all registers - also when jumping due to
1206      // exhausted young space.
1207      __ AllocateHeapNumber(r2, r3, r4, &slow);
1208      __ vcvt_f64_f32(d0, s0);
1209      __ sub(r1, r2, Operand(kHeapObjectTag));
1210      __ vstr(d0, r1, HeapNumber::kValueOffset);
1211
1212      __ mov(r0, r2);
1213      __ Ret();
1214    } else {
1215      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
1216      // AllocateHeapNumber clobbers all registers - also when jumping due to
1217      // exhausted young space.
1218      __ AllocateHeapNumber(r3, r4, r5, &slow);
1219      // VFP is not available, do manual single to double conversion.
1220
1221      // r2: floating point value (binary32)
1222      // r3: heap number for result
1223
1224      // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
1225      // the slow case from here.
1226      __ and_(r0, value, Operand(kBinary32MantissaMask));
1227
1228      // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
1229      // the slow case from here.
1230      __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
1231      __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
1232
1233      Label exponent_rebiased;
1234      __ teq(r1, Operand(0x00));
1235      __ b(eq, &exponent_rebiased);
1236
1237      __ teq(r1, Operand(0xff));
1238      __ mov(r1, Operand(0x7ff), LeaveCC, eq);
1239      __ b(eq, &exponent_rebiased);
1240
1241      // Rebias exponent.
1242      __ add(r1,
1243             r1,
1244             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
1245
1246      __ bind(&exponent_rebiased);
1247      __ and_(r2, value, Operand(kBinary32SignMask));
1248      value = no_reg;
1249      __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
1250
1251      // Shift mantissa.
1252      static const int kMantissaShiftForHiWord =
1253          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
1254
1255      static const int kMantissaShiftForLoWord =
1256          kBitsPerInt - kMantissaShiftForHiWord;
1257
1258      __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
1259      __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
1260
1261      __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
1262      __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
1263
1264      __ mov(r0, r3);
1265      __ Ret();
1266    }
1267
1268  } else {
1269    // Tag integer as smi and return it.
1270    __ mov(r0, Operand(value, LSL, kSmiTagSize));
1271    __ Ret();
1272  }
1273
1274  // Slow case, key and receiver still in r0 and r1.
1275  __ bind(&slow);
1276  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
1277  GenerateRuntimeGetProperty(masm);
1278}
1279
1280
1281void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1282  // ---------- S t a t e --------------
1283  //  -- lr     : return address
1284  //  -- r0     : key
1285  //  -- r1     : receiver
1286  // -----------------------------------
1287  Label slow;
1288
1289  // Check that the receiver isn't a smi.
1290  __ BranchOnSmi(r1, &slow);
1291
1292  // Check that the key is a smi.
1293  __ BranchOnNotSmi(r0, &slow);
1294
1295  // Get the map of the receiver.
1296  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1297
1298  // Check that it has indexed interceptor and access checks
1299  // are not enabled for this object.
1300  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1301  __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
1302  __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
1303  __ b(ne, &slow);
1304
1305  // Everything is fine, call runtime.
1306  __ Push(r1, r0);  // Receiver, key.
1307
1308  // Perform tail call to the entry.
1309  __ TailCallExternalReference(ExternalReference(
1310        IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
1311
1312  __ bind(&slow);
1313  GenerateMiss(masm);
1314}
1315
1316
1317void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1318  // ---------- S t a t e --------------
1319  //  -- r0     : value
1320  //  -- r1     : key
1321  //  -- r2     : receiver
1322  //  -- lr     : return address
1323  // -----------------------------------
1324
1325  // Push receiver, key and value for runtime call.
1326  __ Push(r2, r1, r0);
1327
1328  ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
1329  __ TailCallExternalReference(ref, 3, 1);
1330}
1331
1332
1333void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
1334  // ---------- S t a t e --------------
1335  //  -- r0     : value
1336  //  -- r1     : key
1337  //  -- r2     : receiver
1338  //  -- lr     : return address
1339  // -----------------------------------
1340
1341  // Push receiver, key and value for runtime call.
1342  __ Push(r2, r1, r0);
1343
1344  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
1345}
1346
1347
1348void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
1349  // ---------- S t a t e --------------
1350  //  -- r0     : value
1351  //  -- r1     : key
1352  //  -- r2     : receiver
1353  //  -- lr     : return address
1354  // -----------------------------------
1355  Label slow, fast, array, extra, check_pixel_array;
1356
1357  // Register usage.
1358  Register value = r0;
1359  Register key = r1;
1360  Register receiver = r2;
1361  Register elements = r3;  // Elements array of the receiver.
1362  // r4 and r5 are used as general scratch registers.
1363
1364  // Check that the key is a smi.
1365  __ tst(key, Operand(kSmiTagMask));
1366  __ b(ne, &slow);
1367  // Check that the object isn't a smi.
1368  __ tst(receiver, Operand(kSmiTagMask));
1369  __ b(eq, &slow);
1370  // Get the map of the object.
1371  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
1372  // Check that the receiver does not require access checks.  We need
1373  // to do this because this generic stub does not perform map checks.
1374  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
1375  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1376  __ b(ne, &slow);
1377  // Check if the object is a JS array or not.
1378  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1379  __ cmp(r4, Operand(JS_ARRAY_TYPE));
1380  __ b(eq, &array);
1381  // Check that the object is some kind of JS object.
1382  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1383  __ b(lt, &slow);
1384
1385  // Object case: Check key against length in the elements array.
1386  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1387  // Check that the object is in fast mode (not dictionary).
1388  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1389  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1390  __ cmp(r4, ip);
1391  __ b(ne, &check_pixel_array);
1392  // Check array bounds. Both the key and the length of FixedArray are smis.
1393  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1394  __ cmp(key, Operand(ip));
1395  __ b(lo, &fast);
1396
1397  // Slow case, handle jump to runtime.
1398  __ bind(&slow);
1399  // Entry registers are intact.
1400  // r0: value.
1401  // r1: key.
1402  // r2: receiver.
1403  GenerateRuntimeSetProperty(masm);
1404
1405  // Check whether the elements is a pixel array.
1406  // r4: elements map.
1407  __ bind(&check_pixel_array);
1408  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
1409  __ cmp(r4, ip);
1410  __ b(ne, &slow);
1411  // Check that the value is a smi. If a conversion is needed call into the
1412  // runtime to convert and clamp.
1413  __ BranchOnNotSmi(value, &slow);
1414  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the key.
1415  __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
1416  __ cmp(r4, Operand(ip));
1417  __ b(hs, &slow);
1418  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
1419  {  // Clamp the value to [0..255].
1420    Label done;
1421    __ tst(r5, Operand(0xFFFFFF00));
1422    __ b(eq, &done);
1423    __ mov(r5, Operand(0), LeaveCC, mi);  // 0 if negative.
1424    __ mov(r5, Operand(255), LeaveCC, pl);  // 255 if positive.
1425    __ bind(&done);
1426  }
1427  // Get the pointer to the external array. This clobbers elements.
1428  __ ldr(elements,
1429         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
1430  __ strb(r5, MemOperand(elements, r4));  // Elements is now external array.
1431  __ Ret();
1432
1433  // Extra capacity case: Check if there is extra capacity to
1434  // perform the store and update the length. Used for adding one
1435  // element to the array by writing to array[array.length].
1436  __ bind(&extra);
1437  // Condition code from comparing key and array length is still available.
1438  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
1439  // Check for room in the elements backing store.
1440  // Both the key and the length of FixedArray are smis.
1441  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1442  __ cmp(key, Operand(ip));
1443  __ b(hs, &slow);
1444  // Calculate key + 1 as smi.
1445  ASSERT_EQ(0, kSmiTag);
1446  __ add(r4, key, Operand(Smi::FromInt(1)));
1447  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1448  __ b(&fast);
1449
1450  // Array case: Get the length and the elements array from the JS
1451  // array. Check that the array is in fast mode; if it is the
1452  // length is always a smi.
1453  __ bind(&array);
1454  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1455  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
1456  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1457  __ cmp(r4, ip);
1458  __ b(ne, &slow);
1459
1460  // Check the key against the length in the array.
1461  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1462  __ cmp(key, Operand(ip));
1463  __ b(hs, &extra);
1464  // Fall through to fast case.
1465
1466  __ bind(&fast);
1467  // Fast case, store the value to the elements backing store.
1468  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1469  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
1470  __ str(value, MemOperand(r5));
1471  // Skip write barrier if the written value is a smi.
1472  __ tst(value, Operand(kSmiTagMask));
1473  __ Ret(eq);
1474  // Update write barrier for the elements array address.
1475  __ sub(r4, r5, Operand(elements));
1476  __ RecordWrite(elements, r4, r5);
1477
1478  __ Ret();
1479}
1480
1481
1482// Convert int passed in register ival to IEE 754 single precision
1483// floating point value and store it into register fval.
1484// If VFP3 is available use it for conversion.
1485static void ConvertIntToFloat(MacroAssembler* masm,
1486                              Register ival,
1487                              Register fval,
1488                              Register scratch1,
1489                              Register scratch2) {
1490  if (CpuFeatures::IsSupported(VFP3)) {
1491    CpuFeatures::Scope scope(VFP3);
1492    __ vmov(s0, ival);
1493    __ vcvt_f32_s32(s0, s0);
1494    __ vmov(fval, s0);
1495  } else {
1496    Label not_special, done;
1497    // Move sign bit from source to destination.  This works because the sign
1498    // bit in the exponent word of the double has the same position and polarity
1499    // as the 2's complement sign bit in a Smi.
1500    ASSERT(kBinary32SignMask == 0x80000000u);
1501
1502    __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
1503    // Negate value if it is negative.
1504    __ rsb(ival, ival, Operand(0), LeaveCC, ne);
1505
1506    // We have -1, 0 or 1, which we treat specially. Register ival contains
1507    // absolute value: it is either equal to 1 (special case of -1 and 1),
1508    // greater than 1 (not a special case) or less than 1 (special case of 0).
1509    __ cmp(ival, Operand(1));
1510    __ b(gt, &not_special);
1511
1512    // For 1 or -1 we need to or in the 0 exponent (biased).
1513    static const uint32_t exponent_word_for_1 =
1514        kBinary32ExponentBias << kBinary32ExponentShift;
1515
1516    __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
1517    __ b(&done);
1518
1519    __ bind(&not_special);
1520    // Count leading zeros.
1521    // Gets the wrong answer for 0, but we already checked for that case above.
1522    Register zeros = scratch2;
1523    __ CountLeadingZeros(ival, scratch1, zeros);
1524
1525    // Compute exponent and or it into the exponent register.
1526    __ rsb(scratch1,
1527           zeros,
1528           Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
1529
1530    __ orr(fval,
1531           fval,
1532           Operand(scratch1, LSL, kBinary32ExponentShift));
1533
1534    // Shift up the source chopping the top bit off.
1535    __ add(zeros, zeros, Operand(1));
1536    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
1537    __ mov(ival, Operand(ival, LSL, zeros));
1538    // And the top (top 20 bits).
1539    __ orr(fval,
1540           fval,
1541           Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
1542
1543    __ bind(&done);
1544  }
1545}
1546
1547
1548static bool IsElementTypeSigned(ExternalArrayType array_type) {
1549  switch (array_type) {
1550    case kExternalByteArray:
1551    case kExternalShortArray:
1552    case kExternalIntArray:
1553      return true;
1554
1555    case kExternalUnsignedByteArray:
1556    case kExternalUnsignedShortArray:
1557    case kExternalUnsignedIntArray:
1558      return false;
1559
1560    default:
1561      UNREACHABLE();
1562      return false;
1563  }
1564}
1565
1566
1567void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
1568                                         ExternalArrayType array_type) {
1569  // ---------- S t a t e --------------
1570  //  -- r0     : value
1571  //  -- r1     : key
1572  //  -- r2     : receiver
1573  //  -- lr     : return address
1574  // -----------------------------------
1575  Label slow, check_heap_number;
1576
1577  // Register usage.
1578  Register value = r0;
1579  Register key = r1;
1580  Register receiver = r2;
1581  // r3 mostly holds the elements array or the destination external array.
1582
1583  // Check that the object isn't a smi.
1584  __ BranchOnSmi(receiver, &slow);
1585
1586  // Check that the object is a JS object. Load map into r3.
1587  __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
1588  __ b(le, &slow);
1589
1590  // Check that the receiver does not require access checks.  We need
1591  // to do this because this generic stub does not perform map checks.
1592  __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
1593  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1594  __ b(ne, &slow);
1595
1596  // Check that the key is a smi.
1597  __ BranchOnNotSmi(key, &slow);
1598
1599  // Check that the elements array is the appropriate type of ExternalArray.
1600  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
1601  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1602  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
1603  __ cmp(r4, ip);
1604  __ b(ne, &slow);
1605
1606  // Check that the index is in range.
1607  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the index.
1608  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
1609  __ cmp(r4, ip);
1610  // Unsigned comparison catches both negative and too-large values.
1611  __ b(hs, &slow);
1612
1613  // Handle both smis and HeapNumbers in the fast path. Go to the
1614  // runtime for all other kinds of values.
1615  // r3: external array.
1616  // r4: key (integer).
1617  __ BranchOnNotSmi(value, &check_heap_number);
1618  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
1619  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1620
1621  // r3: base pointer of external storage.
1622  // r4: key (integer).
1623  // r5: value (integer).
1624  switch (array_type) {
1625    case kExternalByteArray:
1626    case kExternalUnsignedByteArray:
1627      __ strb(r5, MemOperand(r3, r4, LSL, 0));
1628      break;
1629    case kExternalShortArray:
1630    case kExternalUnsignedShortArray:
1631      __ strh(r5, MemOperand(r3, r4, LSL, 1));
1632      break;
1633    case kExternalIntArray:
1634    case kExternalUnsignedIntArray:
1635      __ str(r5, MemOperand(r3, r4, LSL, 2));
1636      break;
1637    case kExternalFloatArray:
1638      // Need to perform int-to-float conversion.
1639      ConvertIntToFloat(masm, r5, r6, r7, r9);
1640      __ str(r6, MemOperand(r3, r4, LSL, 2));
1641      break;
1642    default:
1643      UNREACHABLE();
1644      break;
1645  }
1646
1647  // Entry registers are intact, r0 holds the value which is the return value.
1648  __ Ret();
1649
1650
1651  // r3: external array.
1652  // r4: index (integer).
1653  __ bind(&check_heap_number);
1654  __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
1655  __ b(ne, &slow);
1656
1657  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
1658
1659  // r3: base pointer of external storage.
1660  // r4: key (integer).
1661
1662  // The WebGL specification leaves the behavior of storing NaN and
1663  // +/-Infinity into integer arrays basically undefined. For more
1664  // reproducible behavior, convert these to zero.
1665  if (CpuFeatures::IsSupported(VFP3)) {
1666    CpuFeatures::Scope scope(VFP3);
1667
1668    // vldr requires offset to be a multiple of 4 so we can not
1669    // include -kHeapObjectTag into it.
1670    __ sub(r5, r0, Operand(kHeapObjectTag));
1671    __ vldr(d0, r5, HeapNumber::kValueOffset);
1672
1673    if (array_type == kExternalFloatArray) {
1674      __ vcvt_f32_f64(s0, d0);
1675      __ vmov(r5, s0);
1676      __ str(r5, MemOperand(r3, r4, LSL, 2));
1677    } else {
1678      Label done;
1679
1680      // Need to perform float-to-int conversion.
1681      // Test for NaN.
1682      __ vcmp(d0, d0);
1683      // Move vector status bits to normal status bits.
1684      __ vmrs(v8::internal::pc);
1685      __ mov(r5, Operand(0), LeaveCC, vs);  // NaN converts to 0.
1686      __ b(vs, &done);
1687
1688      // Test whether exponent equal to 0x7FF (infinity or NaN).
1689      __ vmov(r6, r7, d0);
1690      __ mov(r5, Operand(0x7FF00000));
1691      __ and_(r6, r6, Operand(r5));
1692      __ teq(r6, Operand(r5));
1693      __ mov(r6, Operand(0), LeaveCC, eq);
1694
1695      // Not infinity or NaN simply convert to int.
1696      if (IsElementTypeSigned(array_type)) {
1697        __ vcvt_s32_f64(s0, d0, ne);
1698      } else {
1699        __ vcvt_u32_f64(s0, d0, ne);
1700      }
1701
1702      __ vmov(r5, s0, ne);
1703
1704      __ bind(&done);
1705      switch (array_type) {
1706        case kExternalByteArray:
1707        case kExternalUnsignedByteArray:
1708          __ strb(r5, MemOperand(r3, r4, LSL, 0));
1709          break;
1710        case kExternalShortArray:
1711        case kExternalUnsignedShortArray:
1712          __ strh(r5, MemOperand(r3, r4, LSL, 1));
1713          break;
1714        case kExternalIntArray:
1715        case kExternalUnsignedIntArray:
1716          __ str(r5, MemOperand(r3, r4, LSL, 2));
1717          break;
1718        default:
1719          UNREACHABLE();
1720          break;
1721      }
1722    }
1723
1724    // Entry registers are intact, r0 holds the value which is the return value.
1725    __ Ret();
1726  } else {
1727    // VFP3 is not available do manual conversions.
1728    __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
1729    __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
1730
1731    if (array_type == kExternalFloatArray) {
1732      Label done, nan_or_infinity_or_zero;
1733      static const int kMantissaInHiWordShift =
1734          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
1735
1736      static const int kMantissaInLoWordShift =
1737          kBitsPerInt - kMantissaInHiWordShift;
1738
1739      // Test for all special exponent values: zeros, subnormal numbers, NaNs
1740      // and infinities. All these should be converted to 0.
1741      __ mov(r7, Operand(HeapNumber::kExponentMask));
1742      __ and_(r9, r5, Operand(r7), SetCC);
1743      __ b(eq, &nan_or_infinity_or_zero);
1744
1745      __ teq(r9, Operand(r7));
1746      __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
1747      __ b(eq, &nan_or_infinity_or_zero);
1748
1749      // Rebias exponent.
1750      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
1751      __ add(r9,
1752             r9,
1753             Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
1754
1755      __ cmp(r9, Operand(kBinary32MaxExponent));
1756      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
1757      __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
1758      __ b(gt, &done);
1759
1760      __ cmp(r9, Operand(kBinary32MinExponent));
1761      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
1762      __ b(lt, &done);
1763
1764      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
1765      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
1766      __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
1767      __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
1768      __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
1769
1770      __ bind(&done);
1771      __ str(r5, MemOperand(r3, r4, LSL, 2));
1772      // Entry registers are intact, r0 holds the value which is the return
1773      // value.
1774      __ Ret();
1775
1776      __ bind(&nan_or_infinity_or_zero);
1777      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
1778      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
1779      __ orr(r9, r9, r7);
1780      __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
1781      __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
1782      __ b(&done);
1783    } else {
1784      bool is_signed_type = IsElementTypeSigned(array_type);
1785      int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
1786      int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
1787
1788      Label done, sign;
1789
1790      // Test for all special exponent values: zeros, subnormal numbers, NaNs
1791      // and infinities. All these should be converted to 0.
1792      __ mov(r7, Operand(HeapNumber::kExponentMask));
1793      __ and_(r9, r5, Operand(r7), SetCC);
1794      __ mov(r5, Operand(0), LeaveCC, eq);
1795      __ b(eq, &done);
1796
1797      __ teq(r9, Operand(r7));
1798      __ mov(r5, Operand(0), LeaveCC, eq);
1799      __ b(eq, &done);
1800
1801      // Unbias exponent.
1802      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
1803      __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
1804      // If exponent is negative than result is 0.
1805      __ mov(r5, Operand(0), LeaveCC, mi);
1806      __ b(mi, &done);
1807
1808      // If exponent is too big than result is minimal value.
1809      __ cmp(r9, Operand(meaningfull_bits - 1));
1810      __ mov(r5, Operand(min_value), LeaveCC, ge);
1811      __ b(ge, &done);
1812
1813      __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
1814      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
1815      __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
1816
1817      __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
1818      __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
1819      __ b(pl, &sign);
1820
1821      __ rsb(r9, r9, Operand(0));
1822      __ mov(r5, Operand(r5, LSL, r9));
1823      __ rsb(r9, r9, Operand(meaningfull_bits));
1824      __ orr(r5, r5, Operand(r6, LSR, r9));
1825
1826      __ bind(&sign);
1827      __ teq(r7, Operand(0));
1828      __ rsb(r5, r5, Operand(0), LeaveCC, ne);
1829
1830      __ bind(&done);
1831      switch (array_type) {
1832        case kExternalByteArray:
1833        case kExternalUnsignedByteArray:
1834          __ strb(r5, MemOperand(r3, r4, LSL, 0));
1835          break;
1836        case kExternalShortArray:
1837        case kExternalUnsignedShortArray:
1838          __ strh(r5, MemOperand(r3, r4, LSL, 1));
1839          break;
1840        case kExternalIntArray:
1841        case kExternalUnsignedIntArray:
1842          __ str(r5, MemOperand(r3, r4, LSL, 2));
1843          break;
1844        default:
1845          UNREACHABLE();
1846          break;
1847      }
1848    }
1849  }
1850
1851  // Slow case: call runtime.
1852  __ bind(&slow);
1853
1854  // Entry registers are intact.
1855  // r0: value
1856  // r1: key
1857  // r2: receiver
1858  GenerateRuntimeSetProperty(masm);
1859}
1860
1861
1862void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1863  // ----------- S t a t e -------------
1864  //  -- r0    : value
1865  //  -- r1    : receiver
1866  //  -- r2    : name
1867  //  -- lr    : return address
1868  // -----------------------------------
1869
1870  // Get the receiver from the stack and probe the stub cache.
1871  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
1872                                         NOT_IN_LOOP,
1873                                         MONOMORPHIC);
1874  StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
1875
1876  // Cache miss: Jump to runtime.
1877  GenerateMiss(masm);
1878}
1879
1880
1881void StoreIC::GenerateMiss(MacroAssembler* masm) {
1882  // ----------- S t a t e -------------
1883  //  -- r0    : value
1884  //  -- r1    : receiver
1885  //  -- r2    : name
1886  //  -- lr    : return address
1887  // -----------------------------------
1888
1889  __ Push(r1, r2, r0);
1890
1891  // Perform tail call to the entry.
1892  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
1893  __ TailCallExternalReference(ref, 3, 1);
1894}
1895
1896
1897void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
1898  // ----------- S t a t e -------------
1899  //  -- r0    : value
1900  //  -- r1    : receiver
1901  //  -- r2    : name
1902  //  -- lr    : return address
1903  // -----------------------------------
1904  //
1905  // This accepts as a receiver anything JSObject::SetElementsLength accepts
1906  // (currently anything except for external and pixel arrays which means
1907  // anything with elements of FixedArray type.), but currently is restricted
1908  // to JSArray.
1909  // Value must be a number, but only smis are accepted as the most common case.
1910
1911  Label miss;
1912
1913  Register receiver = r1;
1914  Register value = r0;
1915  Register scratch = r3;
1916
1917  // Check that the receiver isn't a smi.
1918  __ BranchOnSmi(receiver, &miss);
1919
1920  // Check that the object is a JS array.
1921  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
1922  __ b(ne, &miss);
1923
1924  // Check that elements are FixedArray.
1925  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
1926  __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
1927  __ b(ne, &miss);
1928
1929  // Check that value is a smi.
1930  __ BranchOnNotSmi(value, &miss);
1931
1932  // Prepare tail call to StoreIC_ArrayLength.
1933  __ Push(receiver, value);
1934
1935  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
1936  __ TailCallExternalReference(ref, 2, 1);
1937
1938  __ bind(&miss);
1939
1940  GenerateMiss(masm);
1941}
1942
1943
1944#undef __
1945
1946
1947} }  // namespace v8::internal
1948
1949#endif  // V8_TARGET_ARCH_ARM
1950