1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_MIPS
6
7#include "src/codegen.h"
8#include "src/ic/ic.h"
9#include "src/ic/ic-compiler.h"
10#include "src/ic/stub-cache.h"
11
12namespace v8 {
13namespace internal {
14
15
16// ----------------------------------------------------------------------------
17// Static IC stub generators.
18//
19
20#define __ ACCESS_MASM(masm)
21
22
23static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24                                            Label* global_object) {
25  // Register usage:
26  //   type: holds the receiver instance type on entry.
27  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
28  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
29}
30
31
32// Helper function used from LoadIC GenerateNormal.
33//
34// elements: Property dictionary. It is not clobbered if a jump to the miss
35//           label is done.
36// name:     Property name. It is not clobbered if a jump to the miss label is
37//           done
38// result:   Register for the result. It is only updated if a jump to the miss
39//           label is not done. Can be the same as elements or name clobbering
40//           one of these in the case of not jumping to the miss label.
41// The two scratch registers need to be different from elements, name and
42// result.
43// The generated code assumes that the receiver has slow properties,
44// is not a global object and does not have interceptors.
45// The address returned from GenerateStringDictionaryProbes() in scratch2
46// is used.
47static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
48                                   Register elements, Register name,
49                                   Register result, Register scratch1,
50                                   Register scratch2) {
51  // Main use of the scratch registers.
52  // scratch1: Used as temporary and to hold the capacity of the property
53  //           dictionary.
54  // scratch2: Used as temporary.
55  Label done;
56
57  // Probe the dictionary.
58  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
59                                                   name, scratch1, scratch2);
60
61  // If probing finds an entry check that the value is a normal
62  // property.
63  __ bind(&done);  // scratch2 == elements + 4 * index.
64  const int kElementsStartOffset =
65      NameDictionary::kHeaderSize +
66      NameDictionary::kElementsStartIndex * kPointerSize;
67  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
68  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
69  __ And(at, scratch1,
70         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
71  __ Branch(miss, ne, at, Operand(zero_reg));
72
73  // Get the value at the masked, scaled index and return.
74  __ lw(result,
75        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
76}
77
78
79// Helper function used from StoreIC::GenerateNormal.
80//
81// elements: Property dictionary. It is not clobbered if a jump to the miss
82//           label is done.
83// name:     Property name. It is not clobbered if a jump to the miss label is
84//           done
85// value:    The value to store.
86// The two scratch registers need to be different from elements, name and
87// result.
88// The generated code assumes that the receiver has slow properties,
89// is not a global object and does not have interceptors.
90// The address returned from GenerateStringDictionaryProbes() in scratch2
91// is used.
92static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
93                                    Register elements, Register name,
94                                    Register value, Register scratch1,
95                                    Register scratch2) {
96  // Main use of the scratch registers.
97  // scratch1: Used as temporary and to hold the capacity of the property
98  //           dictionary.
99  // scratch2: Used as temporary.
100  Label done;
101
102  // Probe the dictionary.
103  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
104                                                   name, scratch1, scratch2);
105
106  // If probing finds an entry in the dictionary check that the value
107  // is a normal property that is not read only.
108  __ bind(&done);  // scratch2 == elements + 4 * index.
109  const int kElementsStartOffset =
110      NameDictionary::kHeaderSize +
111      NameDictionary::kElementsStartIndex * kPointerSize;
112  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
113  const int kTypeAndReadOnlyMask =
114      (PropertyDetails::TypeField::kMask |
115       PropertyDetails::AttributesField::encode(READ_ONLY))
116      << kSmiTagSize;
117  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
118  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
119  __ Branch(miss, ne, at, Operand(zero_reg));
120
121  // Store the value at the masked, scaled index and return.
122  const int kValueOffset = kElementsStartOffset + kPointerSize;
123  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
124  __ sw(value, MemOperand(scratch2));
125
126  // Update the write barrier. Make sure not to clobber the value.
127  __ mov(scratch1, value);
128  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
129                 kDontSaveFPRegs);
130}
131
132
133// Checks the receiver for special cases (value type, slow case bits).
134// Falls through for regular JS object.
135static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
136                                           Register receiver, Register map,
137                                           Register scratch,
138                                           int interceptor_bit, Label* slow) {
139  // Check that the object isn't a smi.
140  __ JumpIfSmi(receiver, slow);
141  // Get the map of the receiver.
142  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
143  // Check bit field.
144  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
145  __ And(at, scratch,
146         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
147  __ Branch(slow, ne, at, Operand(zero_reg));
148  // Check that the object is some kind of JS object EXCEPT JS Value type.
149  // In the case that the object is a value-wrapper object,
150  // we enter the runtime system to make sure that indexing into string
151  // objects work as intended.
152  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
153  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
154  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
155}
156
157
158// Loads an indexed element from a fast case array.
159static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
160                                  Register key, Register elements,
161                                  Register scratch1, Register scratch2,
162                                  Register result, Label* slow) {
163  // Register use:
164  //
165  // receiver - holds the receiver on entry.
166  //            Unchanged unless 'result' is the same register.
167  //
168  // key      - holds the smi key on entry.
169  //            Unchanged unless 'result' is the same register.
170  //
171  // result   - holds the result on exit if the load succeeded.
172  //            Allowed to be the the same as 'receiver' or 'key'.
173  //            Unchanged on bailout so 'receiver' and 'key' can be safely
174  //            used by further computation.
175  //
176  // Scratch registers:
177  //
178  // elements - holds the elements of the receiver and its prototypes.
179  //
180  // scratch1 - used to hold elements length, bit fields, base addresses.
181  //
182  // scratch2 - used to hold maps, prototypes, and the loaded value.
183  Label check_prototypes, check_next_prototype;
184  Label done, in_bounds, absent;
185
186  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
187  __ AssertFastElements(elements);
188
189  // Check that the key (index) is within bounds.
190  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
191  __ Branch(&in_bounds, lo, key, Operand(scratch1));
192  // Out-of-bounds. Check the prototype chain to see if we can just return
193  // 'undefined'.
194  // Negative keys can't take the fast OOB path.
195  __ Branch(slow, lt, key, Operand(zero_reg));
196  __ bind(&check_prototypes);
197  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
198  __ bind(&check_next_prototype);
199  __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
200  // scratch2: current prototype
201  __ LoadRoot(at, Heap::kNullValueRootIndex);
202  __ Branch(&absent, eq, scratch2, Operand(at));
203  __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
204  __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
205  // elements: elements of current prototype
206  // scratch2: map of current prototype
207  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
208  __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
209  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
210  __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
211                               (1 << Map::kHasIndexedInterceptor)));
212  __ Branch(slow, ne, at, Operand(zero_reg));
213  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
214  __ Branch(slow, ne, elements, Operand(at));
215  __ Branch(&check_next_prototype);
216
217  __ bind(&absent);
218  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
219  __ Branch(&done);
220
221  __ bind(&in_bounds);
222  // Fast case: Do the load.
223  __ Addu(scratch1, elements,
224          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
225  // The key is a smi.
226  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
227  __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
228  __ lw(scratch2, MemOperand(at));
229
230  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
231  // In case the loaded value is the_hole we have to check the prototype chain.
232  __ Branch(&check_prototypes, eq, scratch2, Operand(at));
233  __ Move(result, scratch2);
234  __ bind(&done);
235}
236
237
238// Checks whether a key is an array index string or a unique name.
239// Falls through if a key is a unique name.
240static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
241                                 Register map, Register hash,
242                                 Label* index_string, Label* not_unique) {
243  // The key is not a smi.
244  Label unique;
245  // Is it a name?
246  __ GetObjectType(key, map, hash);
247  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
248  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
249  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
250
251  // Is the string an array index, with cached numeric value?
252  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
253  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
254  __ Branch(index_string, eq, at, Operand(zero_reg));
255
256  // Is the string internalized? We know it's a string, so a single
257  // bit test is enough.
258  // map: key map
259  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
260  STATIC_ASSERT(kInternalizedTag == 0);
261  __ And(at, hash, Operand(kIsNotInternalizedMask));
262  __ Branch(not_unique, ne, at, Operand(zero_reg));
263
264  __ bind(&unique);
265}
266
267void LoadIC::GenerateNormal(MacroAssembler* masm) {
268  Register dictionary = a0;
269  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
270  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
271
272  Label slow;
273
274  __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
275                                    JSObject::kPropertiesOffset));
276  GenerateDictionaryLoad(masm, &slow, dictionary,
277                         LoadDescriptor::NameRegister(), v0, a3, t0);
278  __ Ret();
279
280  // Dictionary load failed, go slow (but don't miss).
281  __ bind(&slow);
282  GenerateRuntimeGetProperty(masm);
283}
284
285
286// A register that isn't one of the parameters to the load ic.
287static const Register LoadIC_TempRegister() { return a3; }
288
289
290static void LoadIC_PushArgs(MacroAssembler* masm) {
291  Register receiver = LoadDescriptor::ReceiverRegister();
292  Register name = LoadDescriptor::NameRegister();
293  Register slot = LoadDescriptor::SlotRegister();
294  Register vector = LoadWithVectorDescriptor::VectorRegister();
295
296  __ Push(receiver, name, slot, vector);
297}
298
299
300void LoadIC::GenerateMiss(MacroAssembler* masm) {
301  // The return address is in ra.
302  Isolate* isolate = masm->isolate();
303
304  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
305                     LoadWithVectorDescriptor::VectorRegister()));
306  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
307
308  LoadIC_PushArgs(masm);
309
310  // Perform tail call to the entry.
311  __ TailCallRuntime(Runtime::kLoadIC_Miss);
312}
313
314void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
315  // The return address is in ra.
316
317  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
318  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
319
320  // Do tail-call to runtime routine.
321  __ TailCallRuntime(Runtime::kGetProperty);
322}
323
324
325void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
326  // The return address is in ra.
327  Isolate* isolate = masm->isolate();
328
329  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
330                     LoadWithVectorDescriptor::VectorRegister()));
331  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
332
333  LoadIC_PushArgs(masm);
334
335  // Perform tail call to the entry.
336  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
337}
338
339void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
340  // The return address is in ra.
341
342  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
343
344  // Do tail-call to runtime routine.
345  __ TailCallRuntime(Runtime::kKeyedGetProperty);
346}
347
348void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
349  // The return address is in ra.
350  Label slow, check_name, index_smi, index_name, property_array_property;
351  Label probe_dictionary, check_number_dictionary;
352
353  Register key = LoadDescriptor::NameRegister();
354  Register receiver = LoadDescriptor::ReceiverRegister();
355  DCHECK(key.is(a2));
356  DCHECK(receiver.is(a1));
357
358  Isolate* isolate = masm->isolate();
359
360  // Check that the key is a smi.
361  __ JumpIfNotSmi(key, &check_name);
362  __ bind(&index_smi);
363  // Now the key is known to be a smi. This place is also jumped to from below
364  // where a numeric string is converted to a smi.
365
366  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
367                                 Map::kHasIndexedInterceptor, &slow);
368
369  // Check the receiver's map to see if it has fast elements.
370  __ CheckFastElements(a0, a3, &check_number_dictionary);
371
372  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
373  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
374                      a3);
375  __ Ret();
376
377  __ bind(&check_number_dictionary);
378  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
379  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
380
381  // Check whether the elements is a number dictionary.
382  // a3: elements map
383  // t0: elements
384  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
385  __ Branch(&slow, ne, a3, Operand(at));
386  __ sra(a0, key, kSmiTagSize);
387  __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
388  __ Ret();
389
390  // Slow case, key and receiver still in a2 and a1.
391  __ bind(&slow);
392  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
393                      a3);
394  GenerateRuntimeGetProperty(masm);
395
396  __ bind(&check_name);
397  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
398
399  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
400                                 Map::kHasNamedInterceptor, &slow);
401
402
403  // If the receiver is a fast-case object, check the stub cache. Otherwise
404  // probe the dictionary.
405  __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
406  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
407  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
408  __ Branch(&probe_dictionary, eq, t0, Operand(at));
409
410  // The handlers in the stub cache expect a vector and slot. Since we won't
411  // change the IC from any downstream misses, a dummy vector can be used.
412  Register vector = LoadWithVectorDescriptor::VectorRegister();
413  Register slot = LoadWithVectorDescriptor::SlotRegister();
414  DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
415  Handle<TypeFeedbackVector> dummy_vector =
416      TypeFeedbackVector::DummyVector(masm->isolate());
417  int slot_index = dummy_vector->GetIndex(
418      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
419  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
420  __ li(slot, Operand(Smi::FromInt(slot_index)));
421
422  Code::Flags flags =
423      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
424  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
425                                               receiver, key, t0, t1, t2, t5);
426  // Cache miss.
427  GenerateMiss(masm);
428
429  // Do a quick inline probe of the receiver's dictionary, if it
430  // exists.
431  __ bind(&probe_dictionary);
432  // a3: elements
433  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
434  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
435  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
436  // Load the property to v0.
437  GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
438  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
439                      t0, a3);
440  __ Ret();
441
442  __ bind(&index_name);
443  __ IndexFromHash(a3, key);
444  // Now jump to the place where smi keys are handled.
445  __ Branch(&index_smi);
446}
447
448
449static void KeyedStoreGenerateMegamorphicHelper(
450    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
451    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
452    Register value, Register key, Register receiver, Register receiver_map,
453    Register elements_map, Register elements) {
454  Label transition_smi_elements;
455  Label finish_object_store, non_double_value, transition_double_elements;
456  Label fast_double_without_map_check;
457
458  // Fast case: Do the store, could be either Object or double.
459  __ bind(fast_object);
460  Register scratch = t0;
461  Register scratch2 = t4;
462  Register scratch3 = t5;
463  Register address = t1;
464  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
465                     scratch, scratch2, scratch3, address));
466
467  if (check_map == kCheckMap) {
468    __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
469    __ Branch(fast_double, ne, elements_map,
470              Operand(masm->isolate()->factory()->fixed_array_map()));
471  }
472
473  // HOLECHECK: guards "A[i] = V"
474  // We have to go to the runtime if the current value is the hole because
475  // there may be a callback on the element.
476  Label holecheck_passed1;
477  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
478  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
479  __ lw(scratch, MemOperand(address));
480  __ Branch(&holecheck_passed1, ne, scratch,
481            Operand(masm->isolate()->factory()->the_hole_value()));
482  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
483
484  __ bind(&holecheck_passed1);
485
486  // Smi stores don't require further checks.
487  Label non_smi_value;
488  __ JumpIfNotSmi(value, &non_smi_value);
489
490  if (increment_length == kIncrementLength) {
491    // Add 1 to receiver->length.
492    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
493    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
494  }
495  // It's irrelevant whether array is smi-only or not when writing a smi.
496  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
497  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
498  __ sw(value, MemOperand(address));
499  __ Ret();
500
501  __ bind(&non_smi_value);
502  // Escape to elements kind transition case.
503  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
504
505  // Fast elements array, store the value to the elements backing store.
506  __ bind(&finish_object_store);
507  if (increment_length == kIncrementLength) {
508    // Add 1 to receiver->length.
509    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
510    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
511  }
512  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
513  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
514  __ sw(value, MemOperand(address));
515  // Update write barrier for the elements array address.
516  __ mov(scratch, value);  // Preserve the value which is returned.
517  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
518                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
519  __ Ret();
520
521  __ bind(fast_double);
522  if (check_map == kCheckMap) {
523    // Check for fast double array case. If this fails, call through to the
524    // runtime.
525    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
526    __ Branch(slow, ne, elements_map, Operand(at));
527  }
528
529  // HOLECHECK: guards "A[i] double hole?"
530  // We have to see if the double version of the hole is present. If so
531  // go to the runtime.
532  __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
533                                     kHoleNanUpper32Offset - kHeapObjectTag));
534  __ Lsa(address, address, key, kPointerSizeLog2);
535  __ lw(scratch, MemOperand(address));
536  __ Branch(&fast_double_without_map_check, ne, scratch,
537            Operand(kHoleNanUpper32));
538  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
539
540  __ bind(&fast_double_without_map_check);
541  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
542                                 scratch3, &transition_double_elements);
543  if (increment_length == kIncrementLength) {
544    // Add 1 to receiver->length.
545    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
546    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
547  }
548  __ Ret();
549
550  __ bind(&transition_smi_elements);
551  // Transition the array appropriately depending on the value type.
552  __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
553  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
554  __ Branch(&non_double_value, ne, scratch, Operand(at));
555
556  // Value is a double. Transition FAST_SMI_ELEMENTS ->
557  // FAST_DOUBLE_ELEMENTS and complete the store.
558  __ LoadTransitionedArrayMapConditional(
559      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
560  AllocationSiteMode mode =
561      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
562  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
563                                                   receiver_map, mode, slow);
564  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
565  __ jmp(&fast_double_without_map_check);
566
567  __ bind(&non_double_value);
568  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
569  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
570                                         receiver_map, scratch, slow);
571  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
572  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
573      masm, receiver, key, value, receiver_map, mode, slow);
574  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
575  __ jmp(&finish_object_store);
576
577  __ bind(&transition_double_elements);
578  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
579  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
580  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
581  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
582                                         receiver_map, scratch, slow);
583  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
584  ElementsTransitionGenerator::GenerateDoubleToObject(
585      masm, receiver, key, value, receiver_map, mode, slow);
586  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
587  __ jmp(&finish_object_store);
588}
589
590
591void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
592                                       LanguageMode language_mode) {
593  // ---------- S t a t e --------------
594  //  -- a0     : value
595  //  -- a1     : key
596  //  -- a2     : receiver
597  //  -- ra     : return address
598  // -----------------------------------
599  Label slow, fast_object, fast_object_grow;
600  Label fast_double, fast_double_grow;
601  Label array, extra, check_if_double_array, maybe_name_key, miss;
602
603  // Register usage.
604  Register value = StoreDescriptor::ValueRegister();
605  Register key = StoreDescriptor::NameRegister();
606  Register receiver = StoreDescriptor::ReceiverRegister();
607  DCHECK(value.is(a0));
608  Register receiver_map = a3;
609  Register elements_map = t2;
610  Register elements = t3;  // Elements array of the receiver.
611  // t0 and t1 are used as general scratch registers.
612
613  // Check that the key is a smi.
614  __ JumpIfNotSmi(key, &maybe_name_key);
615  // Check that the object isn't a smi.
616  __ JumpIfSmi(receiver, &slow);
617  // Get the map of the object.
618  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
619  // Check that the receiver does not require access checks.
620  // The generic stub does not perform map checks.
621  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
622  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
623  __ Branch(&slow, ne, t0, Operand(zero_reg));
624  // Check if the object is a JS array or not.
625  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
626  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
627  // Check that the object is some kind of JS object EXCEPT JS Value type. In
628  // the case that the object is a value-wrapper object, we enter the runtime
629  // system to make sure that indexing into string objects works as intended.
630  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
631  __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
632
633  // Object case: Check key against length in the elements array.
634  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
635  // Check array bounds. Both the key and the length of FixedArray are smis.
636  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
637  __ Branch(&fast_object, lo, key, Operand(t0));
638
639  // Slow case, handle jump to runtime.
640  __ bind(&slow);
641  // Entry registers are intact.
642  // a0: value.
643  // a1: key.
644  // a2: receiver.
645  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
646  // Never returns to here.
647
648  __ bind(&maybe_name_key);
649  __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
650  __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
651  __ JumpIfNotUniqueNameInstanceType(t0, &slow);
652
653  // The handlers in the stub cache expect a vector and slot. Since we won't
654  // change the IC from any downstream misses, a dummy vector can be used.
655  Register vector = VectorStoreICDescriptor::VectorRegister();
656  Register slot = VectorStoreICDescriptor::SlotRegister();
657  DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
658  Handle<TypeFeedbackVector> dummy_vector =
659      TypeFeedbackVector::DummyVector(masm->isolate());
660  int slot_index = dummy_vector->GetIndex(
661      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
662  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
663  __ li(slot, Operand(Smi::FromInt(slot_index)));
664
665  Code::Flags flags =
666      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
667  masm->isolate()->stub_cache()->GenerateProbe(
668      masm, Code::KEYED_STORE_IC, flags, receiver, key, t1, t2, t4, t5);
669  // Cache miss.
670  __ Branch(&miss);
671
672  // Extra capacity case: Check if there is extra capacity to
673  // perform the store and update the length. Used for adding one
674  // element to the array by writing to array[array.length].
675  __ bind(&extra);
676  // Condition code from comparing key and array length is still available.
677  // Only support writing to array[array.length].
678  __ Branch(&slow, ne, key, Operand(t0));
679  // Check for room in the elements backing store.
680  // Both the key and the length of FixedArray are smis.
681  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
682  __ Branch(&slow, hs, key, Operand(t0));
683  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
684  __ Branch(&check_if_double_array, ne, elements_map,
685            Heap::kFixedArrayMapRootIndex);
686
687  __ jmp(&fast_object_grow);
688
689  __ bind(&check_if_double_array);
690  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
691  __ jmp(&fast_double_grow);
692
693  // Array case: Get the length and the elements array from the JS
694  // array. Check that the array is in fast mode (and writable); if it
695  // is the length is always a smi.
696  __ bind(&array);
697  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
698
699  // Check the key against the length in the array.
700  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
701  __ Branch(&extra, hs, key, Operand(t0));
702
703  KeyedStoreGenerateMegamorphicHelper(
704      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
705      value, key, receiver, receiver_map, elements_map, elements);
706  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
707                                      &fast_double_grow, &slow, kDontCheckMap,
708                                      kIncrementLength, value, key, receiver,
709                                      receiver_map, elements_map, elements);
710
711  __ bind(&miss);
712  GenerateMiss(masm);
713}
714
715
716static void StoreIC_PushArgs(MacroAssembler* masm) {
717  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
718          StoreDescriptor::ValueRegister(),
719          VectorStoreICDescriptor::SlotRegister(),
720          VectorStoreICDescriptor::VectorRegister());
721}
722
723
724void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
725  StoreIC_PushArgs(masm);
726
727  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
728}
729
730void StoreIC::GenerateMiss(MacroAssembler* masm) {
731  StoreIC_PushArgs(masm);
732
733  // Perform tail call to the entry.
734  __ TailCallRuntime(Runtime::kStoreIC_Miss);
735}
736
737
738void StoreIC::GenerateNormal(MacroAssembler* masm) {
739  Label miss;
740  Register receiver = StoreDescriptor::ReceiverRegister();
741  Register name = StoreDescriptor::NameRegister();
742  Register value = StoreDescriptor::ValueRegister();
743  Register dictionary = t1;
744  DCHECK(receiver.is(a1));
745  DCHECK(name.is(a2));
746  DCHECK(value.is(a0));
747  DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3));
748  DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0));
749
750  __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
751
752  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
753  Counters* counters = masm->isolate()->counters();
754  __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
755  __ Ret();
756
757  __ bind(&miss);
758  __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
759  GenerateMiss(masm);
760}
761
762
763#undef __
764
765
766Condition CompareIC::ComputeCondition(Token::Value op) {
767  switch (op) {
768    case Token::EQ_STRICT:
769    case Token::EQ:
770      return eq;
771    case Token::LT:
772      return lt;
773    case Token::GT:
774      return gt;
775    case Token::LTE:
776      return le;
777    case Token::GTE:
778      return ge;
779    default:
780      UNREACHABLE();
781      return kNoCondition;
782  }
783}
784
785
786bool CompareIC::HasInlinedSmiCode(Address address) {
787  // The address of the instruction following the call.
788  Address andi_instruction_address =
789      address + Assembler::kCallTargetAddressOffset;
790
791  // If the instruction following the call is not a andi at, rx, #yyy, nothing
792  // was inlined.
793  Instr instr = Assembler::instr_at(andi_instruction_address);
794  return Assembler::IsAndImmediate(instr) &&
795         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
796}
797
798
799void PatchInlinedSmiCode(Isolate* isolate, Address address,
800                         InlinedSmiCheck check) {
801  Address andi_instruction_address =
802      address + Assembler::kCallTargetAddressOffset;
803
804  // If the instruction following the call is not a andi at, rx, #yyy, nothing
805  // was inlined.
806  Instr instr = Assembler::instr_at(andi_instruction_address);
807  if (!(Assembler::IsAndImmediate(instr) &&
808        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
809    return;
810  }
811
812  // The delta to the start of the map check instruction and the
813  // condition code uses at the patched jump.
814  int delta = Assembler::GetImmediate16(instr);
815  delta += Assembler::GetRs(instr) * kImm16Mask;
816  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
817  // signals that nothing was inlined.
818  if (delta == 0) {
819    return;
820  }
821
822  if (FLAG_trace_ic) {
823    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
824           static_cast<void*>(address),
825           static_cast<void*>(andi_instruction_address), delta);
826  }
827
828  Address patch_address =
829      andi_instruction_address - delta * Instruction::kInstrSize;
830  Instr instr_at_patch = Assembler::instr_at(patch_address);
831  // This is patching a conditional "jump if not smi/jump if smi" site.
832  // Enabling by changing from
833  //   andi at, rx, 0
834  //   Branch <target>, eq, at, Operand(zero_reg)
835  // to:
836  //   andi at, rx, #kSmiTagMask
837  //   Branch <target>, ne, at, Operand(zero_reg)
838  // and vice-versa to be disabled again.
839  CodePatcher patcher(isolate, patch_address, 2);
840  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
841  if (check == ENABLE_INLINED_SMI_CHECK) {
842    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
843    DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
844    patcher.masm()->andi(at, reg, kSmiTagMask);
845  } else {
846    DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
847    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
848    patcher.masm()->andi(at, reg, 0);
849  }
850  Instr branch_instr =
851      Assembler::instr_at(patch_address + Instruction::kInstrSize);
852  DCHECK(Assembler::IsBranch(branch_instr));
853
854  uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
855  // Currently only the 'eq' and 'ne' cond values are supported and the simple
856  // branch instructions and their r6 variants (with opcode being the branch
857  // type). There are some special cases (see Assembler::IsBranch()) so
858  // extending this would be tricky.
859  DCHECK(opcode == BEQ ||    // BEQ
860         opcode == BNE ||    // BNE
861         opcode == POP10 ||  // BEQC
862         opcode == POP30 ||  // BNEC
863         opcode == POP66 ||  // BEQZC
864         opcode == POP76);   // BNEZC
865  switch (opcode) {
866    case BEQ:
867      opcode = BNE;  // change BEQ to BNE.
868      break;
869    case POP10:
870      opcode = POP30;  // change BEQC to BNEC.
871      break;
872    case POP66:
873      opcode = POP76;  // change BEQZC to BNEZC.
874      break;
875    case BNE:
876      opcode = BEQ;  // change BNE to BEQ.
877      break;
878    case POP30:
879      opcode = POP10;  // change BNEC to BEQC.
880      break;
881    case POP76:
882      opcode = POP66;  // change BNEZC to BEQZC.
883      break;
884    default:
885      UNIMPLEMENTED();
886  }
887  patcher.ChangeBranchCondition(branch_instr, opcode);
888}
889}  // namespace internal
890}  // namespace v8
891
892#endif  // V8_TARGET_ARCH_MIPS
893