1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_MIPS64
6
7#include "src/codegen.h"
8#include "src/ic/ic.h"
9#include "src/ic/ic-compiler.h"
10#include "src/ic/stub-cache.h"
11
12namespace v8 {
13namespace internal {
14
15
16// ----------------------------------------------------------------------------
17// Static IC stub generators.
18//
19
20#define __ ACCESS_MASM(masm)
21
22
23static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24                                            Label* global_object) {
25  // Register usage:
26  //   type: holds the receiver instance type on entry.
27  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
28  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
29}
30
31
32// Helper function used from LoadIC GenerateNormal.
33//
34// elements: Property dictionary. It is not clobbered if a jump to the miss
35//           label is done.
36// name:     Property name. It is not clobbered if a jump to the miss label is
37//           done
38// result:   Register for the result. It is only updated if a jump to the miss
39//           label is not done. Can be the same as elements or name clobbering
40//           one of these in the case of not jumping to the miss label.
41// The two scratch registers need to be different from elements, name and
42// result.
43// The generated code assumes that the receiver has slow properties,
44// is not a global object and does not have interceptors.
45// The address returned from GenerateStringDictionaryProbes() in scratch2
46// is used.
47static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
48                                   Register elements, Register name,
49                                   Register result, Register scratch1,
50                                   Register scratch2) {
51  // Main use of the scratch registers.
52  // scratch1: Used as temporary and to hold the capacity of the property
53  //           dictionary.
54  // scratch2: Used as temporary.
55  Label done;
56
57  // Probe the dictionary.
58  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
59                                                   name, scratch1, scratch2);
60
61  // If probing finds an entry check that the value is a normal
62  // property.
63  __ bind(&done);  // scratch2 == elements + 4 * index.
64  const int kElementsStartOffset =
65      NameDictionary::kHeaderSize +
66      NameDictionary::kElementsStartIndex * kPointerSize;
67  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
68  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
69  __ And(at, scratch1,
70         Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
71  __ Branch(miss, ne, at, Operand(zero_reg));
72
73  // Get the value at the masked, scaled index and return.
74  __ ld(result,
75        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
76}
77
78
79// Helper function used from StoreIC::GenerateNormal.
80//
81// elements: Property dictionary. It is not clobbered if a jump to the miss
82//           label is done.
83// name:     Property name. It is not clobbered if a jump to the miss label is
84//           done
85// value:    The value to store.
86// The two scratch registers need to be different from elements, name and
87// result.
88// The generated code assumes that the receiver has slow properties,
89// is not a global object and does not have interceptors.
90// The address returned from GenerateStringDictionaryProbes() in scratch2
91// is used.
92static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
93                                    Register elements, Register name,
94                                    Register value, Register scratch1,
95                                    Register scratch2) {
96  // Main use of the scratch registers.
97  // scratch1: Used as temporary and to hold the capacity of the property
98  //           dictionary.
99  // scratch2: Used as temporary.
100  Label done;
101
102  // Probe the dictionary.
103  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
104                                                   name, scratch1, scratch2);
105
106  // If probing finds an entry in the dictionary check that the value
107  // is a normal property that is not read only.
108  __ bind(&done);  // scratch2 == elements + 4 * index.
109  const int kElementsStartOffset =
110      NameDictionary::kHeaderSize +
111      NameDictionary::kElementsStartIndex * kPointerSize;
112  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
113  const int kTypeAndReadOnlyMask =
114      (PropertyDetails::TypeField::kMask |
115       PropertyDetails::AttributesField::encode(READ_ONLY));
116  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
117  __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
118  __ Branch(miss, ne, at, Operand(zero_reg));
119
120  // Store the value at the masked, scaled index and return.
121  const int kValueOffset = kElementsStartOffset + kPointerSize;
122  __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
123  __ sd(value, MemOperand(scratch2));
124
125  // Update the write barrier. Make sure not to clobber the value.
126  __ mov(scratch1, value);
127  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
128                 kDontSaveFPRegs);
129}
130
131
132// Checks the receiver for special cases (value type, slow case bits).
133// Falls through for regular JS object.
134static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
135                                           Register receiver, Register map,
136                                           Register scratch,
137                                           int interceptor_bit, Label* slow) {
138  // Check that the object isn't a smi.
139  __ JumpIfSmi(receiver, slow);
140  // Get the map of the receiver.
141  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
142  // Check bit field.
143  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
144  __ And(at, scratch,
145         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
146  __ Branch(slow, ne, at, Operand(zero_reg));
147  // Check that the object is some kind of JS object EXCEPT JS Value type.
148  // In the case that the object is a value-wrapper object,
149  // we enter the runtime system to make sure that indexing into string
150  // objects work as intended.
151  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
152  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
153  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
154}
155
156
157// Loads an indexed element from a fast case array.
158static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
159                                  Register key, Register elements,
160                                  Register scratch1, Register scratch2,
161                                  Register result, Label* slow,
162                                  LanguageMode language_mode) {
163  // Register use:
164  //
165  // receiver - holds the receiver on entry.
166  //            Unchanged unless 'result' is the same register.
167  //
168  // key      - holds the smi key on entry.
169  //            Unchanged unless 'result' is the same register.
170  //
171  // result   - holds the result on exit if the load succeeded.
172  //            Allowed to be the the same as 'receiver' or 'key'.
173  //            Unchanged on bailout so 'receiver' and 'key' can be safely
174  //            used by further computation.
175  //
176  // Scratch registers:
177  //
178  // elements - holds the elements of the receiver and its prototypes.
179  //
180  // scratch1 - used to hold elements length, bit fields, base addresses.
181  //
182  // scratch2 - used to hold maps, prototypes, and the loaded value.
183  Label check_prototypes, check_next_prototype;
184  Label done, in_bounds, absent;
185
186  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
187  __ AssertFastElements(elements);
188
189  // Check that the key (index) is within bounds.
190  __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
191  __ Branch(&in_bounds, lo, key, Operand(scratch1));
192  // Out-of-bounds. Check the prototype chain to see if we can just return
193  // 'undefined'.
194  // Negative keys can't take the fast OOB path.
195  __ Branch(slow, lt, key, Operand(zero_reg));
196  __ bind(&check_prototypes);
197  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
198  __ bind(&check_next_prototype);
199  __ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
200  // scratch2: current prototype
201  __ LoadRoot(at, Heap::kNullValueRootIndex);
202  __ Branch(&absent, eq, scratch2, Operand(at));
203  __ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
204  __ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
205  // elements: elements of current prototype
206  // scratch2: map of current prototype
207  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
208  __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
209  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
210  __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
211                               (1 << Map::kHasIndexedInterceptor)));
212  __ Branch(slow, ne, at, Operand(zero_reg));
213  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
214  __ Branch(slow, ne, elements, Operand(at));
215  __ Branch(&check_next_prototype);
216
217  __ bind(&absent);
218  if (is_strong(language_mode)) {
219    __ Branch(slow);
220  } else {
221    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
222    __ Branch(&done);
223  }
224
225  __ bind(&in_bounds);
226  // Fast case: Do the load.
227  __ Daddu(scratch1, elements,
228           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
229  // The key is a smi.
230  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
231  __ SmiScale(at, key, kPointerSizeLog2);
232  __ daddu(at, at, scratch1);
233  __ ld(scratch2, MemOperand(at));
234
235  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
236  // In case the loaded value is the_hole we have to check the prototype chain.
237  __ Branch(&check_prototypes, eq, scratch2, Operand(at));
238  __ Move(result, scratch2);
239  __ bind(&done);
240}
241
242
243// Checks whether a key is an array index string or a unique name.
244// Falls through if a key is a unique name.
245static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
246                                 Register map, Register hash,
247                                 Label* index_string, Label* not_unique) {
248  // The key is not a smi.
249  Label unique;
250  // Is it a name?
251  __ GetObjectType(key, map, hash);
252  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
253  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
254  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
255
256  // Is the string an array index, with cached numeric value?
257  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
258  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
259  __ Branch(index_string, eq, at, Operand(zero_reg));
260
261  // Is the string internalized? We know it's a string, so a single
262  // bit test is enough.
263  // map: key map
264  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
265  STATIC_ASSERT(kInternalizedTag == 0);
266  __ And(at, hash, Operand(kIsNotInternalizedMask));
267  __ Branch(not_unique, ne, at, Operand(zero_reg));
268
269  __ bind(&unique);
270}
271
272
273void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
274  Register dictionary = a0;
275  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
276  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
277  Label slow;
278
279  __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
280                                    JSObject::kPropertiesOffset));
281  GenerateDictionaryLoad(masm, &slow, dictionary,
282                         LoadDescriptor::NameRegister(), v0, a3, a4);
283  __ Ret();
284
285  // Dictionary load failed, go slow (but don't miss).
286  __ bind(&slow);
287  GenerateRuntimeGetProperty(masm, language_mode);
288}
289
290
291// A register that isn't one of the parameters to the load ic.
292static const Register LoadIC_TempRegister() { return a3; }
293
294
295static void LoadIC_PushArgs(MacroAssembler* masm) {
296  Register receiver = LoadDescriptor::ReceiverRegister();
297  Register name = LoadDescriptor::NameRegister();
298  Register slot = LoadDescriptor::SlotRegister();
299  Register vector = LoadWithVectorDescriptor::VectorRegister();
300
301  __ Push(receiver, name, slot, vector);
302}
303
304
305void LoadIC::GenerateMiss(MacroAssembler* masm) {
306  // The return address is on the stack.
307  Isolate* isolate = masm->isolate();
308
309  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
310                     LoadWithVectorDescriptor::VectorRegister()));
311  __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
312
313  LoadIC_PushArgs(masm);
314
315  // Perform tail call to the entry.
316  __ TailCallRuntime(Runtime::kLoadIC_Miss);
317}
318
319
320void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
321                                        LanguageMode language_mode) {
322  // The return address is in ra.
323
324  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
325  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
326
327  // Do tail-call to runtime routine.
328  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
329                                              : Runtime::kGetProperty);
330}
331
332
333void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
334  // The return address is in ra.
335  Isolate* isolate = masm->isolate();
336
337  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
338                     LoadWithVectorDescriptor::VectorRegister()));
339  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
340
341  LoadIC_PushArgs(masm);
342
343  // Perform tail call to the entry.
344  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
345}
346
347
348void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
349                                             LanguageMode language_mode) {
350  // The return address is in ra.
351
352  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
353
354  // Do tail-call to runtime routine.
355  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
356                                              : Runtime::kKeyedGetProperty);
357}
358
359
360void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
361                                      LanguageMode language_mode) {
362  // The return address is in ra.
363  Label slow, check_name, index_smi, index_name, property_array_property;
364  Label probe_dictionary, check_number_dictionary;
365
366  Register key = LoadDescriptor::NameRegister();
367  Register receiver = LoadDescriptor::ReceiverRegister();
368  DCHECK(key.is(a2));
369  DCHECK(receiver.is(a1));
370
371  Isolate* isolate = masm->isolate();
372
373  // Check that the key is a smi.
374  __ JumpIfNotSmi(key, &check_name);
375  __ bind(&index_smi);
376  // Now the key is known to be a smi. This place is also jumped to from below
377  // where a numeric string is converted to a smi.
378
379  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
380                                 Map::kHasIndexedInterceptor, &slow);
381
382  // Check the receiver's map to see if it has fast elements.
383  __ CheckFastElements(a0, a3, &check_number_dictionary);
384
385  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow,
386                        language_mode);
387  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
388  __ Ret();
389
390  __ bind(&check_number_dictionary);
391  __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
392  __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
393
394  // Check whether the elements is a number dictionary.
395  // a3: elements map
396  // a4: elements
397  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
398  __ Branch(&slow, ne, a3, Operand(at));
399  __ dsra32(a0, key, 0);
400  __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
401  __ Ret();
402
403  // Slow case, key and receiver still in a2 and a1.
404  __ bind(&slow);
405  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
406                      a3);
407  GenerateRuntimeGetProperty(masm, language_mode);
408
409  __ bind(&check_name);
410  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
411
412  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
413                                 Map::kHasNamedInterceptor, &slow);
414
415
416  // If the receiver is a fast-case object, check the stub cache. Otherwise
417  // probe the dictionary.
418  __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
419  __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
420  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
421  __ Branch(&probe_dictionary, eq, a4, Operand(at));
422
423  // The handlers in the stub cache expect a vector and slot. Since we won't
424  // change the IC from any downstream misses, a dummy vector can be used.
425  Register vector = LoadWithVectorDescriptor::VectorRegister();
426  Register slot = LoadWithVectorDescriptor::SlotRegister();
427  DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
428  Handle<TypeFeedbackVector> dummy_vector =
429      TypeFeedbackVector::DummyVector(masm->isolate());
430  int slot_index = dummy_vector->GetIndex(
431      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
432  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
433  __ li(slot, Operand(Smi::FromInt(slot_index)));
434
435  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
436      Code::ComputeHandlerFlags(Code::LOAD_IC));
437  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
438                                               receiver, key, a4, a5, a6, t1);
439  // Cache miss.
440  GenerateMiss(masm);
441
442  // Do a quick inline probe of the receiver's dictionary, if it
443  // exists.
444  __ bind(&probe_dictionary);
445  // a3: elements
446  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
447  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
448  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
449  // Load the property to v0.
450  GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
451  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
452                      a3);
453  __ Ret();
454
455  __ bind(&index_name);
456  __ IndexFromHash(a3, key);
457  // Now jump to the place where smi keys are handled.
458  __ Branch(&index_smi);
459}
460
461
462static void KeyedStoreGenerateMegamorphicHelper(
463    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
464    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
465    Register value, Register key, Register receiver, Register receiver_map,
466    Register elements_map, Register elements) {
467  Label transition_smi_elements;
468  Label finish_object_store, non_double_value, transition_double_elements;
469  Label fast_double_without_map_check;
470
471  // Fast case: Do the store, could be either Object or double.
472  __ bind(fast_object);
473  Register scratch = a4;
474  Register scratch2 = t0;
475  Register address = a5;
476  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
477                     scratch, scratch2, address));
478
479  if (check_map == kCheckMap) {
480    __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
481    __ Branch(fast_double, ne, elements_map,
482              Operand(masm->isolate()->factory()->fixed_array_map()));
483  }
484
485  // HOLECHECK: guards "A[i] = V"
486  // We have to go to the runtime if the current value is the hole because
487  // there may be a callback on the element.
488  Label holecheck_passed1;
489  __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
490  __ SmiScale(at, key, kPointerSizeLog2);
491  __ daddu(address, address, at);
492  __ ld(scratch, MemOperand(address));
493
494  __ Branch(&holecheck_passed1, ne, scratch,
495            Operand(masm->isolate()->factory()->the_hole_value()));
496  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
497
498  __ bind(&holecheck_passed1);
499
500  // Smi stores don't require further checks.
501  Label non_smi_value;
502  __ JumpIfNotSmi(value, &non_smi_value);
503
504  if (increment_length == kIncrementLength) {
505    // Add 1 to receiver->length.
506    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
507    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
508  }
509  // It's irrelevant whether array is smi-only or not when writing a smi.
510  __ Daddu(address, elements,
511           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
512  __ SmiScale(scratch, key, kPointerSizeLog2);
513  __ Daddu(address, address, scratch);
514  __ sd(value, MemOperand(address));
515  __ Ret();
516
517  __ bind(&non_smi_value);
518  // Escape to elements kind transition case.
519  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
520
521  // Fast elements array, store the value to the elements backing store.
522  __ bind(&finish_object_store);
523  if (increment_length == kIncrementLength) {
524    // Add 1 to receiver->length.
525    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
526    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
527  }
528  __ Daddu(address, elements,
529           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
530  __ SmiScale(scratch, key, kPointerSizeLog2);
531  __ Daddu(address, address, scratch);
532  __ sd(value, MemOperand(address));
533  // Update write barrier for the elements array address.
534  __ mov(scratch, value);  // Preserve the value which is returned.
535  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
536                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
537  __ Ret();
538
539  __ bind(fast_double);
540  if (check_map == kCheckMap) {
541    // Check for fast double array case. If this fails, call through to the
542    // runtime.
543    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
544    __ Branch(slow, ne, elements_map, Operand(at));
545  }
546
547  // HOLECHECK: guards "A[i] double hole?"
548  // We have to see if the double version of the hole is present. If so
549  // go to the runtime.
550  __ Daddu(address, elements,
551           Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
552                   kHeapObjectTag));
553  __ SmiScale(at, key, kPointerSizeLog2);
554  __ daddu(address, address, at);
555  __ lw(scratch, MemOperand(address));
556  __ Branch(&fast_double_without_map_check, ne, scratch,
557            Operand(static_cast<int32_t>(kHoleNanUpper32)));
558  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
559
560  __ bind(&fast_double_without_map_check);
561  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
562                                 &transition_double_elements);
563  if (increment_length == kIncrementLength) {
564    // Add 1 to receiver->length.
565    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
566    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
567  }
568  __ Ret();
569
570  __ bind(&transition_smi_elements);
571  // Transition the array appropriately depending on the value type.
572  __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
573  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
574  __ Branch(&non_double_value, ne, scratch, Operand(at));
575
576  // Value is a double. Transition FAST_SMI_ELEMENTS ->
577  // FAST_DOUBLE_ELEMENTS and complete the store.
578  __ LoadTransitionedArrayMapConditional(
579      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
580  AllocationSiteMode mode =
581      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
582  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
583                                                   receiver_map, mode, slow);
584  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
585  __ jmp(&fast_double_without_map_check);
586
587  __ bind(&non_double_value);
588  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
589  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
590                                         receiver_map, scratch, slow);
591  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
592  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
593      masm, receiver, key, value, receiver_map, mode, slow);
594  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
595  __ jmp(&finish_object_store);
596
597  __ bind(&transition_double_elements);
598  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
599  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
600  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
601  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
602                                         receiver_map, scratch, slow);
603  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
604  ElementsTransitionGenerator::GenerateDoubleToObject(
605      masm, receiver, key, value, receiver_map, mode, slow);
606  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
607  __ jmp(&finish_object_store);
608}
609
610
611void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
612                                       LanguageMode language_mode) {
613  // ---------- S t a t e --------------
614  //  -- a0     : value
615  //  -- a1     : key
616  //  -- a2     : receiver
617  //  -- ra     : return address
618  // -----------------------------------
619  Label slow, fast_object, fast_object_grow;
620  Label fast_double, fast_double_grow;
621  Label array, extra, check_if_double_array, maybe_name_key, miss;
622
623  // Register usage.
624  Register value = StoreDescriptor::ValueRegister();
625  Register key = StoreDescriptor::NameRegister();
626  Register receiver = StoreDescriptor::ReceiverRegister();
627  DCHECK(value.is(a0));
628  Register receiver_map = a3;
629  Register elements_map = a6;
630  Register elements = a7;  // Elements array of the receiver.
631  // a4 and a5 are used as general scratch registers.
632
633  // Check that the key is a smi.
634  __ JumpIfNotSmi(key, &maybe_name_key);
635  // Check that the object isn't a smi.
636  __ JumpIfSmi(receiver, &slow);
637  // Get the map of the object.
638  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
639  // Check that the receiver does not require access checks and is not observed.
640  // The generic stub does not perform map checks or handle observed objects.
641  __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
642  __ And(a4, a4,
643         Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
644  __ Branch(&slow, ne, a4, Operand(zero_reg));
645  // Check if the object is a JS array or not.
646  __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
647  __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
648  // Check that the object is some kind of JSObject.
649  __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
650
651  // Object case: Check key against length in the elements array.
652  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
653  // Check array bounds. Both the key and the length of FixedArray are smis.
654  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
655  __ Branch(&fast_object, lo, key, Operand(a4));
656
657  // Slow case, handle jump to runtime.
658  __ bind(&slow);
659  // Entry registers are intact.
660  // a0: value.
661  // a1: key.
662  // a2: receiver.
663  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
664  // Never returns to here.
665
666  __ bind(&maybe_name_key);
667  __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
668  __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
669  __ JumpIfNotUniqueNameInstanceType(a4, &slow);
670
671  // The handlers in the stub cache expect a vector and slot. Since we won't
672  // change the IC from any downstream misses, a dummy vector can be used.
673  Register vector = VectorStoreICDescriptor::VectorRegister();
674  Register slot = VectorStoreICDescriptor::SlotRegister();
675
676  DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
677  Handle<TypeFeedbackVector> dummy_vector =
678      TypeFeedbackVector::DummyVector(masm->isolate());
679  int slot_index = dummy_vector->GetIndex(
680      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
681  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
682  __ li(slot, Operand(Smi::FromInt(slot_index)));
683
684  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
685      Code::ComputeHandlerFlags(Code::STORE_IC));
686  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
687                                               receiver, key, a5, a6, a7, t0);
688  // Cache miss.
689  __ Branch(&miss);
690
691  // Extra capacity case: Check if there is extra capacity to
692  // perform the store and update the length. Used for adding one
693  // element to the array by writing to array[array.length].
694  __ bind(&extra);
695  // Condition code from comparing key and array length is still available.
696  // Only support writing to array[array.length].
697  __ Branch(&slow, ne, key, Operand(a4));
698  // Check for room in the elements backing store.
699  // Both the key and the length of FixedArray are smis.
700  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
701  __ Branch(&slow, hs, key, Operand(a4));
702  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
703  __ Branch(&check_if_double_array, ne, elements_map,
704            Heap::kFixedArrayMapRootIndex);
705
706  __ jmp(&fast_object_grow);
707
708  __ bind(&check_if_double_array);
709  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
710  __ jmp(&fast_double_grow);
711
712  // Array case: Get the length and the elements array from the JS
713  // array. Check that the array is in fast mode (and writable); if it
714  // is the length is always a smi.
715  __ bind(&array);
716  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
717
718  // Check the key against the length in the array.
719  __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
720  __ Branch(&extra, hs, key, Operand(a4));
721
722  KeyedStoreGenerateMegamorphicHelper(
723      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
724      value, key, receiver, receiver_map, elements_map, elements);
725  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
726                                      &fast_double_grow, &slow, kDontCheckMap,
727                                      kIncrementLength, value, key, receiver,
728                                      receiver_map, elements_map, elements);
729
730  __ bind(&miss);
731  GenerateMiss(masm);
732}
733
734
735static void StoreIC_PushArgs(MacroAssembler* masm) {
736  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
737          StoreDescriptor::ValueRegister(),
738          VectorStoreICDescriptor::SlotRegister(),
739          VectorStoreICDescriptor::VectorRegister());
740}
741
742
743void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
744  StoreIC_PushArgs(masm);
745
746  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
747}
748
749
750void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
751  Register receiver = StoreDescriptor::ReceiverRegister();
752  Register name = StoreDescriptor::NameRegister();
753  DCHECK(receiver.is(a1));
754  DCHECK(name.is(a2));
755  DCHECK(StoreDescriptor::ValueRegister().is(a0));
756
757  // Get the receiver from the stack and probe the stub cache.
758  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
759      Code::ComputeHandlerFlags(Code::STORE_IC));
760  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
761                                               receiver, name, a3, a4, a5, a6);
762
763  // Cache miss: Jump to runtime.
764  GenerateMiss(masm);
765}
766
767
768void StoreIC::GenerateMiss(MacroAssembler* masm) {
769  StoreIC_PushArgs(masm);
770
771  // Perform tail call to the entry.
772  __ TailCallRuntime(Runtime::kStoreIC_Miss);
773}
774
775
776void StoreIC::GenerateNormal(MacroAssembler* masm) {
777  Label miss;
778  Register receiver = StoreDescriptor::ReceiverRegister();
779  Register name = StoreDescriptor::NameRegister();
780  Register value = StoreDescriptor::ValueRegister();
781  Register dictionary = a5;
782  DCHECK(!AreAliased(
783      value, receiver, name, VectorStoreICDescriptor::VectorRegister(),
784      VectorStoreICDescriptor::SlotRegister(), dictionary, a6, a7));
785
786  __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
787
788  GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
789  Counters* counters = masm->isolate()->counters();
790  __ IncrementCounter(counters->store_normal_hit(), 1, a6, a7);
791  __ Ret();
792
793  __ bind(&miss);
794  __ IncrementCounter(counters->store_normal_miss(), 1, a6, a7);
795  GenerateMiss(masm);
796}
797
798
799#undef __
800
801
802Condition CompareIC::ComputeCondition(Token::Value op) {
803  switch (op) {
804    case Token::EQ_STRICT:
805    case Token::EQ:
806      return eq;
807    case Token::LT:
808      return lt;
809    case Token::GT:
810      return gt;
811    case Token::LTE:
812      return le;
813    case Token::GTE:
814      return ge;
815    default:
816      UNREACHABLE();
817      return kNoCondition;
818  }
819}
820
821
822bool CompareIC::HasInlinedSmiCode(Address address) {
823  // The address of the instruction following the call.
824  Address andi_instruction_address =
825      address + Assembler::kCallTargetAddressOffset;
826
827  // If the instruction following the call is not a andi at, rx, #yyy, nothing
828  // was inlined.
829  Instr instr = Assembler::instr_at(andi_instruction_address);
830  return Assembler::IsAndImmediate(instr) &&
831         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
832}
833
834
835void PatchInlinedSmiCode(Isolate* isolate, Address address,
836                         InlinedSmiCheck check) {
837  Address andi_instruction_address =
838      address + Assembler::kCallTargetAddressOffset;
839
840  // If the instruction following the call is not a andi at, rx, #yyy, nothing
841  // was inlined.
842  Instr instr = Assembler::instr_at(andi_instruction_address);
843  if (!(Assembler::IsAndImmediate(instr) &&
844        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
845    return;
846  }
847
848  // The delta to the start of the map check instruction and the
849  // condition code uses at the patched jump.
850  int delta = Assembler::GetImmediate16(instr);
851  delta += Assembler::GetRs(instr) * kImm16Mask;
852  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
853  // signals that nothing was inlined.
854  if (delta == 0) {
855    return;
856  }
857
858  if (FLAG_trace_ic) {
859    PrintF("[  patching ic at %p, andi=%p, delta=%d\n", address,
860           andi_instruction_address, delta);
861  }
862
863  Address patch_address =
864      andi_instruction_address - delta * Instruction::kInstrSize;
865  Instr instr_at_patch = Assembler::instr_at(patch_address);
866  // This is patching a conditional "jump if not smi/jump if smi" site.
867  // Enabling by changing from
868  //   andi at, rx, 0
869  //   Branch <target>, eq, at, Operand(zero_reg)
870  // to:
871  //   andi at, rx, #kSmiTagMask
872  //   Branch <target>, ne, at, Operand(zero_reg)
873  // and vice-versa to be disabled again.
874  CodePatcher patcher(isolate, patch_address, 2);
875  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
876  if (check == ENABLE_INLINED_SMI_CHECK) {
877    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
878    DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
879    patcher.masm()->andi(at, reg, kSmiTagMask);
880  } else {
881    DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
882    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
883    patcher.masm()->andi(at, reg, 0);
884  }
885  Instr branch_instr =
886      Assembler::instr_at(patch_address + Instruction::kInstrSize);
887  DCHECK(Assembler::IsBranch(branch_instr));
888
889  uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
890  // Currently only the 'eq' and 'ne' cond values are supported and the simple
891  // branch instructions and their r6 variants (with opcode being the branch
892  // type). There are some special cases (see Assembler::IsBranch()) so
893  // extending this would be tricky.
894  DCHECK(opcode == BEQ ||    // BEQ
895         opcode == BNE ||    // BNE
896         opcode == POP10 ||  // BEQC
897         opcode == POP30 ||  // BNEC
898         opcode == POP66 ||  // BEQZC
899         opcode == POP76);   // BNEZC
900  switch (opcode) {
901    case BEQ:
902      opcode = BNE;  // change BEQ to BNE.
903      break;
904    case POP10:
905      opcode = POP30;  // change BEQC to BNEC.
906      break;
907    case POP66:
908      opcode = POP76;  // change BEQZC to BNEZC.
909      break;
910    case BNE:
911      opcode = BEQ;  // change BNE to BEQ.
912      break;
913    case POP30:
914      opcode = POP10;  // change BNEC to BEQC.
915      break;
916    case POP76:
917      opcode = POP66;  // change BNEZC to BEQZC.
918      break;
919    default:
920      UNIMPLEMENTED();
921  }
922  patcher.ChangeBranchCondition(branch_instr, opcode);
923}
924}  // namespace internal
925}  // namespace v8
926
927#endif  // V8_TARGET_ARCH_MIPS64
928