1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_MIPS64
6
7#include "src/codegen.h"
8#include "src/ic/ic.h"
9#include "src/ic/ic-compiler.h"
10#include "src/ic/stub-cache.h"
11
12namespace v8 {
13namespace internal {
14
15
16// ----------------------------------------------------------------------------
17// Static IC stub generators.
18//
19
20#define __ ACCESS_MASM(masm)
21
22
23static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24                                            Label* global_object) {
25  // Register usage:
26  //   type: holds the receiver instance type on entry.
27  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
28  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
29}
30
31
32// Helper function used from LoadIC GenerateNormal.
33//
34// elements: Property dictionary. It is not clobbered if a jump to the miss
35//           label is done.
36// name:     Property name. It is not clobbered if a jump to the miss label is
37//           done
38// result:   Register for the result. It is only updated if a jump to the miss
39//           label is not done. Can be the same as elements or name clobbering
40//           one of these in the case of not jumping to the miss label.
41// The two scratch registers need to be different from elements, name and
42// result.
43// The generated code assumes that the receiver has slow properties,
44// is not a global object and does not have interceptors.
45// The address returned from GenerateStringDictionaryProbes() in scratch2
46// is used.
47static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
48                                   Register elements, Register name,
49                                   Register result, Register scratch1,
50                                   Register scratch2) {
51  // Main use of the scratch registers.
52  // scratch1: Used as temporary and to hold the capacity of the property
53  //           dictionary.
54  // scratch2: Used as temporary.
55  Label done;
56
57  // Probe the dictionary.
58  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
59                                                   name, scratch1, scratch2);
60
61  // If probing finds an entry check that the value is a normal
62  // property.
63  __ bind(&done);  // scratch2 == elements + 4 * index.
64  const int kElementsStartOffset =
65      NameDictionary::kHeaderSize +
66      NameDictionary::kElementsStartIndex * kPointerSize;
67  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
68  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
69  __ And(at, scratch1,
70         Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
71  __ Branch(miss, ne, at, Operand(zero_reg));
72
73  // Get the value at the masked, scaled index and return.
74  __ ld(result,
75        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
76}
77
78
79// Helper function used from StoreIC::GenerateNormal.
80//
81// elements: Property dictionary. It is not clobbered if a jump to the miss
82//           label is done.
83// name:     Property name. It is not clobbered if a jump to the miss label is
84//           done
85// value:    The value to store.
86// The two scratch registers need to be different from elements, name and
87// result.
88// The generated code assumes that the receiver has slow properties,
89// is not a global object and does not have interceptors.
90// The address returned from GenerateStringDictionaryProbes() in scratch2
91// is used.
92static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
93                                    Register elements, Register name,
94                                    Register value, Register scratch1,
95                                    Register scratch2) {
96  // Main use of the scratch registers.
97  // scratch1: Used as temporary and to hold the capacity of the property
98  //           dictionary.
99  // scratch2: Used as temporary.
100  Label done;
101
102  // Probe the dictionary.
103  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
104                                                   name, scratch1, scratch2);
105
106  // If probing finds an entry in the dictionary check that the value
107  // is a normal property that is not read only.
108  __ bind(&done);  // scratch2 == elements + 4 * index.
109  const int kElementsStartOffset =
110      NameDictionary::kHeaderSize +
111      NameDictionary::kElementsStartIndex * kPointerSize;
112  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
113  const int kTypeAndReadOnlyMask =
114      (PropertyDetails::TypeField::kMask |
115       PropertyDetails::AttributesField::encode(READ_ONLY));
116  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
117  __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
118  __ Branch(miss, ne, at, Operand(zero_reg));
119
120  // Store the value at the masked, scaled index and return.
121  const int kValueOffset = kElementsStartOffset + kPointerSize;
122  __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
123  __ sd(value, MemOperand(scratch2));
124
125  // Update the write barrier. Make sure not to clobber the value.
126  __ mov(scratch1, value);
127  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
128                 kDontSaveFPRegs);
129}
130
131
132// Checks the receiver for special cases (value type, slow case bits).
133// Falls through for regular JS object.
134static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
135                                           Register receiver, Register map,
136                                           Register scratch,
137                                           int interceptor_bit, Label* slow) {
138  // Check that the object isn't a smi.
139  __ JumpIfSmi(receiver, slow);
140  // Get the map of the receiver.
141  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
142  // Check bit field.
143  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
144  __ And(at, scratch,
145         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
146  __ Branch(slow, ne, at, Operand(zero_reg));
147  // Check that the object is some kind of JS object EXCEPT JS Value type.
148  // In the case that the object is a value-wrapper object,
149  // we enter the runtime system to make sure that indexing into string
150  // objects work as intended.
151  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
152  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
153  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
154}
155
156
157// Loads an indexed element from a fast case array.
158static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
159                                  Register key, Register elements,
160                                  Register scratch1, Register scratch2,
161                                  Register result, Label* slow) {
162  // Register use:
163  //
164  // receiver - holds the receiver on entry.
165  //            Unchanged unless 'result' is the same register.
166  //
167  // key      - holds the smi key on entry.
168  //            Unchanged unless 'result' is the same register.
169  //
170  // result   - holds the result on exit if the load succeeded.
171  //            Allowed to be the the same as 'receiver' or 'key'.
172  //            Unchanged on bailout so 'receiver' and 'key' can be safely
173  //            used by further computation.
174  //
175  // Scratch registers:
176  //
177  // elements - holds the elements of the receiver and its prototypes.
178  //
179  // scratch1 - used to hold elements length, bit fields, base addresses.
180  //
181  // scratch2 - used to hold maps, prototypes, and the loaded value.
182  Label check_prototypes, check_next_prototype;
183  Label done, in_bounds, absent;
184
185  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
186  __ AssertFastElements(elements);
187
188  // Check that the key (index) is within bounds.
189  __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
190  __ Branch(&in_bounds, lo, key, Operand(scratch1));
191  // Out-of-bounds. Check the prototype chain to see if we can just return
192  // 'undefined'.
193  // Negative keys can't take the fast OOB path.
194  __ Branch(slow, lt, key, Operand(zero_reg));
195  __ bind(&check_prototypes);
196  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
197  __ bind(&check_next_prototype);
198  __ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
199  // scratch2: current prototype
200  __ LoadRoot(at, Heap::kNullValueRootIndex);
201  __ Branch(&absent, eq, scratch2, Operand(at));
202  __ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
203  __ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
204  // elements: elements of current prototype
205  // scratch2: map of current prototype
206  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
207  __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
208  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
209  __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
210                               (1 << Map::kHasIndexedInterceptor)));
211  __ Branch(slow, ne, at, Operand(zero_reg));
212  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
213  __ Branch(slow, ne, elements, Operand(at));
214  __ Branch(&check_next_prototype);
215
216  __ bind(&absent);
217  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
218  __ Branch(&done);
219
220  __ bind(&in_bounds);
221  // Fast case: Do the load.
222  __ Daddu(scratch1, elements,
223           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
224  // The key is a smi.
225  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
226  __ SmiScale(at, key, kPointerSizeLog2);
227  __ daddu(at, at, scratch1);
228  __ ld(scratch2, MemOperand(at));
229
230  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
231  // In case the loaded value is the_hole we have to check the prototype chain.
232  __ Branch(&check_prototypes, eq, scratch2, Operand(at));
233  __ Move(result, scratch2);
234  __ bind(&done);
235}
236
237
238// Checks whether a key is an array index string or a unique name.
239// Falls through if a key is a unique name.
240static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
241                                 Register map, Register hash,
242                                 Label* index_string, Label* not_unique) {
243  // The key is not a smi.
244  Label unique;
245  // Is it a name?
246  __ GetObjectType(key, map, hash);
247  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
248  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
249  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
250
251  // Is the string an array index, with cached numeric value?
252  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
253  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
254  __ Branch(index_string, eq, at, Operand(zero_reg));
255
256  // Is the string internalized? We know it's a string, so a single
257  // bit test is enough.
258  // map: key map
259  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
260  STATIC_ASSERT(kInternalizedTag == 0);
261  __ And(at, hash, Operand(kIsNotInternalizedMask));
262  __ Branch(not_unique, ne, at, Operand(zero_reg));
263
264  __ bind(&unique);
265}
266
267void LoadIC::GenerateNormal(MacroAssembler* masm) {
268  Register dictionary = a0;
269  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
270  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
271  Label slow;
272
273  __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
274                                    JSObject::kPropertiesOffset));
275  GenerateDictionaryLoad(masm, &slow, dictionary,
276                         LoadDescriptor::NameRegister(), v0, a3, a4);
277  __ Ret();
278
279  // Dictionary load failed, go slow (but don't miss).
280  __ bind(&slow);
281  GenerateRuntimeGetProperty(masm);
282}
283
284
285// A register that isn't one of the parameters to the load ic.
286static const Register LoadIC_TempRegister() { return a3; }
287
288
289static void LoadIC_PushArgs(MacroAssembler* masm) {
290  Register receiver = LoadDescriptor::ReceiverRegister();
291  Register name = LoadDescriptor::NameRegister();
292  Register slot = LoadDescriptor::SlotRegister();
293  Register vector = LoadWithVectorDescriptor::VectorRegister();
294
295  __ Push(receiver, name, slot, vector);
296}
297
298
299void LoadIC::GenerateMiss(MacroAssembler* masm) {
300  // The return address is on the stack.
301  Isolate* isolate = masm->isolate();
302
303  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
304                     LoadWithVectorDescriptor::VectorRegister()));
305  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
306
307  LoadIC_PushArgs(masm);
308
309  // Perform tail call to the entry.
310  __ TailCallRuntime(Runtime::kLoadIC_Miss);
311}
312
313void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
314  // The return address is in ra.
315
316  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
317  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
318
319  // Do tail-call to runtime routine.
320  __ TailCallRuntime(Runtime::kGetProperty);
321}
322
323
324void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
325  // The return address is in ra.
326  Isolate* isolate = masm->isolate();
327
328  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
329                     LoadWithVectorDescriptor::VectorRegister()));
330  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
331
332  LoadIC_PushArgs(masm);
333
334  // Perform tail call to the entry.
335  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
336}
337
338void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
339  // The return address is in ra.
340
341  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
342
343  // Do tail-call to runtime routine.
344  __ TailCallRuntime(Runtime::kKeyedGetProperty);
345}
346
347void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
348  // The return address is in ra.
349  Label slow, check_name, index_smi, index_name, property_array_property;
350  Label probe_dictionary, check_number_dictionary;
351
352  Register key = LoadDescriptor::NameRegister();
353  Register receiver = LoadDescriptor::ReceiverRegister();
354  DCHECK(key.is(a2));
355  DCHECK(receiver.is(a1));
356
357  Isolate* isolate = masm->isolate();
358
359  // Check that the key is a smi.
360  __ JumpIfNotSmi(key, &check_name);
361  __ bind(&index_smi);
362  // Now the key is known to be a smi. This place is also jumped to from below
363  // where a numeric string is converted to a smi.
364
365  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
366                                 Map::kHasIndexedInterceptor, &slow);
367
368  // Check the receiver's map to see if it has fast elements.
369  __ CheckFastElements(a0, a3, &check_number_dictionary);
370
371  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
372  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
373                      a3);
374  __ Ret();
375
376  __ bind(&check_number_dictionary);
377  __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
378  __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
379
380  // Check whether the elements is a number dictionary.
381  // a3: elements map
382  // a4: elements
383  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
384  __ Branch(&slow, ne, a3, Operand(at));
385  __ dsra32(a0, key, 0);
386  __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
387  __ Ret();
388
389  // Slow case, key and receiver still in a2 and a1.
390  __ bind(&slow);
391  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
392                      a3);
393  GenerateRuntimeGetProperty(masm);
394
395  __ bind(&check_name);
396  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
397
398  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
399                                 Map::kHasNamedInterceptor, &slow);
400
401
402  // If the receiver is a fast-case object, check the stub cache. Otherwise
403  // probe the dictionary.
404  __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
405  __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
406  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
407  __ Branch(&probe_dictionary, eq, a4, Operand(at));
408
409  // The handlers in the stub cache expect a vector and slot. Since we won't
410  // change the IC from any downstream misses, a dummy vector can be used.
411  Register vector = LoadWithVectorDescriptor::VectorRegister();
412  Register slot = LoadWithVectorDescriptor::SlotRegister();
413  DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
414  Handle<TypeFeedbackVector> dummy_vector =
415      TypeFeedbackVector::DummyVector(masm->isolate());
416  int slot_index = dummy_vector->GetIndex(
417      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
418  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
419  __ li(slot, Operand(Smi::FromInt(slot_index)));
420
421  Code::Flags flags =
422      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
423  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
424                                               receiver, key, a4, a5, a6, t1);
425  // Cache miss.
426  GenerateMiss(masm);
427
428  // Do a quick inline probe of the receiver's dictionary, if it
429  // exists.
430  __ bind(&probe_dictionary);
431  // a3: elements
432  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
433  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
434  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
435  // Load the property to v0.
436  GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
437  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
438                      a4, a3);
439  __ Ret();
440
441  __ bind(&index_name);
442  __ IndexFromHash(a3, key);
443  // Now jump to the place where smi keys are handled.
444  __ Branch(&index_smi);
445}
446
447
448static void KeyedStoreGenerateMegamorphicHelper(
449    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
450    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
451    Register value, Register key, Register receiver, Register receiver_map,
452    Register elements_map, Register elements) {
453  Label transition_smi_elements;
454  Label finish_object_store, non_double_value, transition_double_elements;
455  Label fast_double_without_map_check;
456
457  // Fast case: Do the store, could be either Object or double.
458  __ bind(fast_object);
459  Register scratch = a4;
460  Register scratch2 = t0;
461  Register address = a5;
462  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
463                     scratch, scratch2, address));
464
465  if (check_map == kCheckMap) {
466    __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
467    __ Branch(fast_double, ne, elements_map,
468              Operand(masm->isolate()->factory()->fixed_array_map()));
469  }
470
471  // HOLECHECK: guards "A[i] = V"
472  // We have to go to the runtime if the current value is the hole because
473  // there may be a callback on the element.
474  Label holecheck_passed1;
475  __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
476  __ SmiScale(at, key, kPointerSizeLog2);
477  __ daddu(address, address, at);
478  __ ld(scratch, MemOperand(address));
479
480  __ Branch(&holecheck_passed1, ne, scratch,
481            Operand(masm->isolate()->factory()->the_hole_value()));
482  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
483
484  __ bind(&holecheck_passed1);
485
486  // Smi stores don't require further checks.
487  Label non_smi_value;
488  __ JumpIfNotSmi(value, &non_smi_value);
489
490  if (increment_length == kIncrementLength) {
491    // Add 1 to receiver->length.
492    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
493    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
494  }
495  // It's irrelevant whether array is smi-only or not when writing a smi.
496  __ Daddu(address, elements,
497           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
498  __ SmiScale(scratch, key, kPointerSizeLog2);
499  __ Daddu(address, address, scratch);
500  __ sd(value, MemOperand(address));
501  __ Ret();
502
503  __ bind(&non_smi_value);
504  // Escape to elements kind transition case.
505  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
506
507  // Fast elements array, store the value to the elements backing store.
508  __ bind(&finish_object_store);
509  if (increment_length == kIncrementLength) {
510    // Add 1 to receiver->length.
511    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
512    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
513  }
514  __ Daddu(address, elements,
515           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
516  __ SmiScale(scratch, key, kPointerSizeLog2);
517  __ Daddu(address, address, scratch);
518  __ sd(value, MemOperand(address));
519  // Update write barrier for the elements array address.
520  __ mov(scratch, value);  // Preserve the value which is returned.
521  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
522                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
523  __ Ret();
524
525  __ bind(fast_double);
526  if (check_map == kCheckMap) {
527    // Check for fast double array case. If this fails, call through to the
528    // runtime.
529    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
530    __ Branch(slow, ne, elements_map, Operand(at));
531  }
532
533  // HOLECHECK: guards "A[i] double hole?"
534  // We have to see if the double version of the hole is present. If so
535  // go to the runtime.
536  __ Daddu(address, elements,
537           Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
538                   kHeapObjectTag));
539  __ SmiScale(at, key, kPointerSizeLog2);
540  __ daddu(address, address, at);
541  __ lw(scratch, MemOperand(address));
542  __ Branch(&fast_double_without_map_check, ne, scratch,
543            Operand(static_cast<int32_t>(kHoleNanUpper32)));
544  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
545
546  __ bind(&fast_double_without_map_check);
547  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
548                                 &transition_double_elements);
549  if (increment_length == kIncrementLength) {
550    // Add 1 to receiver->length.
551    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
552    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
553  }
554  __ Ret();
555
556  __ bind(&transition_smi_elements);
557  // Transition the array appropriately depending on the value type.
558  __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
559  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
560  __ Branch(&non_double_value, ne, scratch, Operand(at));
561
562  // Value is a double. Transition FAST_SMI_ELEMENTS ->
563  // FAST_DOUBLE_ELEMENTS and complete the store.
564  __ LoadTransitionedArrayMapConditional(
565      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
566  AllocationSiteMode mode =
567      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
568  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
569                                                   receiver_map, mode, slow);
570  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
571  __ jmp(&fast_double_without_map_check);
572
573  __ bind(&non_double_value);
574  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
575  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
576                                         receiver_map, scratch, slow);
577  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
578  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
579      masm, receiver, key, value, receiver_map, mode, slow);
580  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
581  __ jmp(&finish_object_store);
582
583  __ bind(&transition_double_elements);
584  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
585  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
586  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
587  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
588                                         receiver_map, scratch, slow);
589  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
590  ElementsTransitionGenerator::GenerateDoubleToObject(
591      masm, receiver, key, value, receiver_map, mode, slow);
592  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
593  __ jmp(&finish_object_store);
594}
595
596
597void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
598                                       LanguageMode language_mode) {
599  // ---------- S t a t e --------------
600  //  -- a0     : value
601  //  -- a1     : key
602  //  -- a2     : receiver
603  //  -- ra     : return address
604  // -----------------------------------
605  Label slow, fast_object, fast_object_grow;
606  Label fast_double, fast_double_grow;
607  Label array, extra, check_if_double_array, maybe_name_key, miss;
608
609  // Register usage.
610  Register value = StoreDescriptor::ValueRegister();
611  Register key = StoreDescriptor::NameRegister();
612  Register receiver = StoreDescriptor::ReceiverRegister();
613  DCHECK(value.is(a0));
614  Register receiver_map = a3;
615  Register elements_map = a6;
616  Register elements = a7;  // Elements array of the receiver.
617  // a4 and a5 are used as general scratch registers.
618
619  // Check that the key is a smi.
620  __ JumpIfNotSmi(key, &maybe_name_key);
621  // Check that the object isn't a smi.
622  __ JumpIfSmi(receiver, &slow);
623  // Get the map of the object.
624  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
625  // Check that the receiver does not require access checks.
626  // The generic stub does not perform map checks.
627  __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
628  __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
629  __ Branch(&slow, ne, a4, Operand(zero_reg));
630  // Check if the object is a JS array or not.
631  __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
632  __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
633  // Check that the object is some kind of JSObject.
634  __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
635
636  // Object case: Check key against length in the elements array.
637  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
638  // Check array bounds. Both the key and the length of FixedArray are smis.
639  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
640  __ Branch(&fast_object, lo, key, Operand(a4));
641
642  // Slow case, handle jump to runtime.
643  __ bind(&slow);
644  // Entry registers are intact.
645  // a0: value.
646  // a1: key.
647  // a2: receiver.
648  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
649  // Never returns to here.
650
651  __ bind(&maybe_name_key);
652  __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
653  __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
654  __ JumpIfNotUniqueNameInstanceType(a4, &slow);
655
656  // The handlers in the stub cache expect a vector and slot. Since we won't
657  // change the IC from any downstream misses, a dummy vector can be used.
658  Register vector = VectorStoreICDescriptor::VectorRegister();
659  Register slot = VectorStoreICDescriptor::SlotRegister();
660
661  DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
662  Handle<TypeFeedbackVector> dummy_vector =
663      TypeFeedbackVector::DummyVector(masm->isolate());
664  int slot_index = dummy_vector->GetIndex(
665      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
666  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
667  __ li(slot, Operand(Smi::FromInt(slot_index)));
668
669  Code::Flags flags =
670      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
671  masm->isolate()->stub_cache()->GenerateProbe(
672      masm, Code::KEYED_STORE_IC, flags, receiver, key, a5, a6, a7, t0);
673  // Cache miss.
674  __ Branch(&miss);
675
676  // Extra capacity case: Check if there is extra capacity to
677  // perform the store and update the length. Used for adding one
678  // element to the array by writing to array[array.length].
679  __ bind(&extra);
680  // Condition code from comparing key and array length is still available.
681  // Only support writing to array[array.length].
682  __ Branch(&slow, ne, key, Operand(a4));
683  // Check for room in the elements backing store.
684  // Both the key and the length of FixedArray are smis.
685  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
686  __ Branch(&slow, hs, key, Operand(a4));
687  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
688  __ Branch(&check_if_double_array, ne, elements_map,
689            Heap::kFixedArrayMapRootIndex);
690
691  __ jmp(&fast_object_grow);
692
693  __ bind(&check_if_double_array);
694  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
695  __ jmp(&fast_double_grow);
696
697  // Array case: Get the length and the elements array from the JS
698  // array. Check that the array is in fast mode (and writable); if it
699  // is the length is always a smi.
700  __ bind(&array);
701  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
702
703  // Check the key against the length in the array.
704  __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
705  __ Branch(&extra, hs, key, Operand(a4));
706
707  KeyedStoreGenerateMegamorphicHelper(
708      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
709      value, key, receiver, receiver_map, elements_map, elements);
710  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
711                                      &fast_double_grow, &slow, kDontCheckMap,
712                                      kIncrementLength, value, key, receiver,
713                                      receiver_map, elements_map, elements);
714
715  __ bind(&miss);
716  GenerateMiss(masm);
717}
718
719
720static void StoreIC_PushArgs(MacroAssembler* masm) {
721  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
722          StoreDescriptor::ValueRegister(),
723          VectorStoreICDescriptor::SlotRegister(),
724          VectorStoreICDescriptor::VectorRegister());
725}
726
727
728void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
729  StoreIC_PushArgs(masm);
730
731  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
732}
733
734void StoreIC::GenerateMiss(MacroAssembler* masm) {
735  StoreIC_PushArgs(masm);
736
737  // Perform tail call to the entry.
738  __ TailCallRuntime(Runtime::kStoreIC_Miss);
739}
740
741
742void StoreIC::GenerateNormal(MacroAssembler* masm) {
743  Label miss;
744  Register receiver = StoreDescriptor::ReceiverRegister();
745  Register name = StoreDescriptor::NameRegister();
746  Register value = StoreDescriptor::ValueRegister();
747  Register dictionary = a5;
748  DCHECK(!AreAliased(
749      value, receiver, name, VectorStoreICDescriptor::VectorRegister(),
750      VectorStoreICDescriptor::SlotRegister(), dictionary, a6, a7));
751
752  __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
753
754  GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
755  Counters* counters = masm->isolate()->counters();
756  __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
757  __ Ret();
758
759  __ bind(&miss);
760  __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
761  GenerateMiss(masm);
762}
763
764
765#undef __
766
767
768Condition CompareIC::ComputeCondition(Token::Value op) {
769  switch (op) {
770    case Token::EQ_STRICT:
771    case Token::EQ:
772      return eq;
773    case Token::LT:
774      return lt;
775    case Token::GT:
776      return gt;
777    case Token::LTE:
778      return le;
779    case Token::GTE:
780      return ge;
781    default:
782      UNREACHABLE();
783      return kNoCondition;
784  }
785}
786
787
788bool CompareIC::HasInlinedSmiCode(Address address) {
789  // The address of the instruction following the call.
790  Address andi_instruction_address =
791      address + Assembler::kCallTargetAddressOffset;
792
793  // If the instruction following the call is not a andi at, rx, #yyy, nothing
794  // was inlined.
795  Instr instr = Assembler::instr_at(andi_instruction_address);
796  return Assembler::IsAndImmediate(instr) &&
797         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
798}
799
800
801void PatchInlinedSmiCode(Isolate* isolate, Address address,
802                         InlinedSmiCheck check) {
803  Address andi_instruction_address =
804      address + Assembler::kCallTargetAddressOffset;
805
806  // If the instruction following the call is not a andi at, rx, #yyy, nothing
807  // was inlined.
808  Instr instr = Assembler::instr_at(andi_instruction_address);
809  if (!(Assembler::IsAndImmediate(instr) &&
810        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
811    return;
812  }
813
814  // The delta to the start of the map check instruction and the
815  // condition code uses at the patched jump.
816  int delta = Assembler::GetImmediate16(instr);
817  delta += Assembler::GetRs(instr) * kImm16Mask;
818  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
819  // signals that nothing was inlined.
820  if (delta == 0) {
821    return;
822  }
823
824  if (FLAG_trace_ic) {
825    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
826           static_cast<void*>(address),
827           static_cast<void*>(andi_instruction_address), delta);
828  }
829
830  Address patch_address =
831      andi_instruction_address - delta * Instruction::kInstrSize;
832  Instr instr_at_patch = Assembler::instr_at(patch_address);
833  // This is patching a conditional "jump if not smi/jump if smi" site.
834  // Enabling by changing from
835  //   andi at, rx, 0
836  //   Branch <target>, eq, at, Operand(zero_reg)
837  // to:
838  //   andi at, rx, #kSmiTagMask
839  //   Branch <target>, ne, at, Operand(zero_reg)
840  // and vice-versa to be disabled again.
841  CodePatcher patcher(isolate, patch_address, 2);
842  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
843  if (check == ENABLE_INLINED_SMI_CHECK) {
844    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
845    DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
846    patcher.masm()->andi(at, reg, kSmiTagMask);
847  } else {
848    DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
849    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
850    patcher.masm()->andi(at, reg, 0);
851  }
852  Instr branch_instr =
853      Assembler::instr_at(patch_address + Instruction::kInstrSize);
854  DCHECK(Assembler::IsBranch(branch_instr));
855
856  uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
857  // Currently only the 'eq' and 'ne' cond values are supported and the simple
858  // branch instructions and their r6 variants (with opcode being the branch
859  // type). There are some special cases (see Assembler::IsBranch()) so
860  // extending this would be tricky.
861  DCHECK(opcode == BEQ ||    // BEQ
862         opcode == BNE ||    // BNE
863         opcode == POP10 ||  // BEQC
864         opcode == POP30 ||  // BNEC
865         opcode == POP66 ||  // BEQZC
866         opcode == POP76);   // BNEZC
867  switch (opcode) {
868    case BEQ:
869      opcode = BNE;  // change BEQ to BNE.
870      break;
871    case POP10:
872      opcode = POP30;  // change BEQC to BNEC.
873      break;
874    case POP66:
875      opcode = POP76;  // change BEQZC to BNEZC.
876      break;
877    case BNE:
878      opcode = BEQ;  // change BNE to BEQ.
879      break;
880    case POP30:
881      opcode = POP10;  // change BNEC to BEQC.
882      break;
883    case POP76:
884      opcode = POP66;  // change BNEZC to BEQZC.
885      break;
886    default:
887      UNIMPLEMENTED();
888  }
889  patcher.ChangeBranchCondition(branch_instr, opcode);
890}
891}  // namespace internal
892}  // namespace v8
893
894#endif  // V8_TARGET_ARCH_MIPS64
895