code-stubs-mips.cc revision db1b4389239a7132c9cde0915dbd3f775dc1027a
1// Copyright 2012 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#if defined(V8_TARGET_ARCH_MIPS) 31 32#include "bootstrapper.h" 33#include "code-stubs.h" 34#include "codegen.h" 35#include "regexp-macro-assembler.h" 36 37namespace v8 { 38namespace internal { 39 40 41#define __ ACCESS_MASM(masm) 42 43static void EmitIdenticalObjectComparison(MacroAssembler* masm, 44 Label* slow, 45 Condition cc, 46 bool never_nan_nan); 47static void EmitSmiNonsmiComparison(MacroAssembler* masm, 48 Register lhs, 49 Register rhs, 50 Label* rhs_not_nan, 51 Label* slow, 52 bool strict); 53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); 54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 55 Register lhs, 56 Register rhs); 57 58 59// Check if the operand is a heap number. 60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, 61 Register scratch1, Register scratch2, 62 Label* not_a_heap_number) { 63 __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); 64 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); 65 __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2)); 66} 67 68 69void ToNumberStub::Generate(MacroAssembler* masm) { 70 // The ToNumber stub takes one argument in a0. 71 Label check_heap_number, call_builtin; 72 __ JumpIfNotSmi(a0, &check_heap_number); 73 __ Ret(USE_DELAY_SLOT); 74 __ mov(v0, a0); 75 76 __ bind(&check_heap_number); 77 EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin); 78 __ Ret(USE_DELAY_SLOT); 79 __ mov(v0, a0); 80 81 __ bind(&call_builtin); 82 __ push(a0); 83 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); 84} 85 86 87void FastNewClosureStub::Generate(MacroAssembler* masm) { 88 // Create a new closure from the given function info in new 89 // space. Set the context to the current context in cp. 90 Label gc; 91 92 // Pop the function info from the stack. 93 __ pop(a3); 94 95 // Attempt to allocate new JSFunction in new space. 96 __ AllocateInNewSpace(JSFunction::kSize, 97 v0, 98 a1, 99 a2, 100 &gc, 101 TAG_OBJECT); 102 103 int map_index = (language_mode_ == CLASSIC_MODE) 104 ? Context::FUNCTION_MAP_INDEX 105 : Context::STRICT_MODE_FUNCTION_MAP_INDEX; 106 107 // Compute the function map in the current global context and set that 108 // as the map of the allocated object. 109 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 110 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); 111 __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index))); 112 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); 113 114 // Initialize the rest of the function. We don't have to update the 115 // write barrier because the allocated object is in new space. 116 __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex); 117 __ LoadRoot(a2, Heap::kTheHoleValueRootIndex); 118 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); 119 __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 120 __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset)); 121 __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset)); 122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); 123 __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset)); 124 __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset)); 125 __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset)); 126 127 // Initialize the code pointer in the function to be the one 128 // found in the shared function info object. 129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset)); 130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); 131 132 // Return result. The argument function info has been popped already. 133 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset)); 134 __ Ret(); 135 136 // Create a new closure through the slower runtime call. 137 __ bind(&gc); 138 __ LoadRoot(t0, Heap::kFalseValueRootIndex); 139 __ Push(cp, a3, t0); 140 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); 141} 142 143 144void FastNewContextStub::Generate(MacroAssembler* masm) { 145 // Try to allocate the context in new space. 146 Label gc; 147 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 148 149 // Attempt to allocate the context in new space. 150 __ AllocateInNewSpace(FixedArray::SizeFor(length), 151 v0, 152 a1, 153 a2, 154 &gc, 155 TAG_OBJECT); 156 157 // Load the function from the stack. 158 __ lw(a3, MemOperand(sp, 0)); 159 160 // Set up the object header. 161 __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex); 162 __ li(a2, Operand(Smi::FromInt(length))); 163 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); 164 __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); 165 166 // Set up the fixed slots, copy the global object from the previous context. 167 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 168 __ li(a1, Operand(Smi::FromInt(0))); 169 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX))); 170 __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX))); 171 __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX))); 172 __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX))); 173 174 // Initialize the rest of the slots to undefined. 175 __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); 176 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { 177 __ sw(a1, MemOperand(v0, Context::SlotOffset(i))); 178 } 179 180 // Remove the on-stack argument and return. 181 __ mov(cp, v0); 182 __ DropAndRet(1); 183 184 // Need to collect. Call into runtime system. 185 __ bind(&gc); 186 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); 187} 188 189 190void FastNewBlockContextStub::Generate(MacroAssembler* masm) { 191 // Stack layout on entry: 192 // 193 // [sp]: function. 194 // [sp + kPointerSize]: serialized scope info 195 196 // Try to allocate the context in new space. 197 Label gc; 198 int length = slots_ + Context::MIN_CONTEXT_SLOTS; 199 __ AllocateInNewSpace(FixedArray::SizeFor(length), 200 v0, a1, a2, &gc, TAG_OBJECT); 201 202 // Load the function from the stack. 203 __ lw(a3, MemOperand(sp, 0)); 204 205 // Load the serialized scope info from the stack. 206 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); 207 208 // Set up the object header. 209 __ LoadRoot(a2, Heap::kBlockContextMapRootIndex); 210 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); 211 __ li(a2, Operand(Smi::FromInt(length))); 212 __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); 213 214 // If this block context is nested in the global context we get a smi 215 // sentinel instead of a function. The block context should get the 216 // canonical empty function of the global context as its closure which 217 // we still have to look up. 218 Label after_sentinel; 219 __ JumpIfNotSmi(a3, &after_sentinel); 220 if (FLAG_debug_code) { 221 const char* message = "Expected 0 as a Smi sentinel"; 222 __ Assert(eq, message, a3, Operand(zero_reg)); 223 } 224 __ lw(a3, GlobalObjectOperand()); 225 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset)); 226 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); 227 __ bind(&after_sentinel); 228 229 // Set up the fixed slots, copy the global object from the previous context. 230 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX)); 231 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); 232 __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); 233 __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); 234 __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX)); 235 236 // Initialize the rest of the slots to the hole value. 237 __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); 238 for (int i = 0; i < slots_; i++) { 239 __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS)); 240 } 241 242 // Remove the on-stack argument and return. 243 __ mov(cp, v0); 244 __ DropAndRet(2); 245 246 // Need to collect. Call into runtime system. 247 __ bind(&gc); 248 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); 249} 250 251 252static void GenerateFastCloneShallowArrayCommon( 253 MacroAssembler* masm, 254 int length, 255 FastCloneShallowArrayStub::Mode mode, 256 Label* fail) { 257 // Registers on entry: 258 // a3: boilerplate literal array. 259 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); 260 261 // All sizes here are multiples of kPointerSize. 262 int elements_size = 0; 263 if (length > 0) { 264 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS 265 ? FixedDoubleArray::SizeFor(length) 266 : FixedArray::SizeFor(length); 267 } 268 int size = JSArray::kSize + elements_size; 269 270 // Allocate both the JS array and the elements array in one big 271 // allocation. This avoids multiple limit checks. 272 __ AllocateInNewSpace(size, 273 v0, 274 a1, 275 a2, 276 fail, 277 TAG_OBJECT); 278 279 // Copy the JS array part. 280 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { 281 if ((i != JSArray::kElementsOffset) || (length == 0)) { 282 __ lw(a1, FieldMemOperand(a3, i)); 283 __ sw(a1, FieldMemOperand(v0, i)); 284 } 285 } 286 287 if (length > 0) { 288 // Get hold of the elements array of the boilerplate and setup the 289 // elements pointer in the resulting object. 290 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); 291 __ Addu(a2, v0, Operand(JSArray::kSize)); 292 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset)); 293 294 // Copy the elements array. 295 ASSERT((elements_size % kPointerSize) == 0); 296 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize); 297 } 298} 299 300void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { 301 // Stack layout on entry: 302 // 303 // [sp]: constant elements. 304 // [sp + kPointerSize]: literal index. 305 // [sp + (2 * kPointerSize)]: literals array. 306 307 // Load boilerplate object into r3 and check if we need to create a 308 // boilerplate. 309 Label slow_case; 310 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); 311 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); 312 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 313 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); 314 __ Addu(t0, a3, t0); 315 __ lw(a3, MemOperand(t0)); 316 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); 317 __ Branch(&slow_case, eq, a3, Operand(t1)); 318 319 FastCloneShallowArrayStub::Mode mode = mode_; 320 if (mode == CLONE_ANY_ELEMENTS) { 321 Label double_elements, check_fast_elements; 322 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset)); 323 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); 324 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex); 325 __ Branch(&check_fast_elements, ne, v0, Operand(t1)); 326 GenerateFastCloneShallowArrayCommon(masm, 0, 327 COPY_ON_WRITE_ELEMENTS, &slow_case); 328 // Return and remove the on-stack parameters. 329 __ DropAndRet(3); 330 331 __ bind(&check_fast_elements); 332 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); 333 __ Branch(&double_elements, ne, v0, Operand(t1)); 334 GenerateFastCloneShallowArrayCommon(masm, length_, 335 CLONE_ELEMENTS, &slow_case); 336 // Return and remove the on-stack parameters. 337 __ DropAndRet(3); 338 339 __ bind(&double_elements); 340 mode = CLONE_DOUBLE_ELEMENTS; 341 // Fall through to generate the code to handle double elements. 342 } 343 344 if (FLAG_debug_code) { 345 const char* message; 346 Heap::RootListIndex expected_map_index; 347 if (mode == CLONE_ELEMENTS) { 348 message = "Expected (writable) fixed array"; 349 expected_map_index = Heap::kFixedArrayMapRootIndex; 350 } else if (mode == CLONE_DOUBLE_ELEMENTS) { 351 message = "Expected (writable) fixed double array"; 352 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex; 353 } else { 354 ASSERT(mode == COPY_ON_WRITE_ELEMENTS); 355 message = "Expected copy-on-write fixed array"; 356 expected_map_index = Heap::kFixedCOWArrayMapRootIndex; 357 } 358 __ push(a3); 359 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); 360 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset)); 361 __ LoadRoot(at, expected_map_index); 362 __ Assert(eq, message, a3, Operand(at)); 363 __ pop(a3); 364 } 365 366 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); 367 368 // Return and remove the on-stack parameters. 369 __ DropAndRet(3); 370 371 __ bind(&slow_case); 372 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); 373} 374 375 376void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { 377 // Stack layout on entry: 378 // 379 // [sp]: object literal flags. 380 // [sp + kPointerSize]: constant properties. 381 // [sp + (2 * kPointerSize)]: literal index. 382 // [sp + (3 * kPointerSize)]: literals array. 383 384 // Load boilerplate object into a3 and check if we need to create a 385 // boilerplate. 386 Label slow_case; 387 __ lw(a3, MemOperand(sp, 3 * kPointerSize)); 388 __ lw(a0, MemOperand(sp, 2 * kPointerSize)); 389 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 390 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); 391 __ Addu(a3, t0, a3); 392 __ lw(a3, MemOperand(a3)); 393 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); 394 __ Branch(&slow_case, eq, a3, Operand(t0)); 395 396 // Check that the boilerplate contains only fast properties and we can 397 // statically determine the instance size. 398 int size = JSObject::kHeaderSize + length_ * kPointerSize; 399 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset)); 400 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset)); 401 __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2)); 402 403 // Allocate the JS object and copy header together with all in-object 404 // properties from the boilerplate. 405 __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT); 406 for (int i = 0; i < size; i += kPointerSize) { 407 __ lw(a1, FieldMemOperand(a3, i)); 408 __ sw(a1, FieldMemOperand(v0, i)); 409 } 410 411 // Return and remove the on-stack parameters. 412 __ DropAndRet(4); 413 414 __ bind(&slow_case); 415 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); 416} 417 418 419// Takes a Smi and converts to an IEEE 64 bit floating point value in two 420// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and 421// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a 422// scratch register. Destroys the source register. No GC occurs during this 423// stub so you don't have to set up the frame. 424class ConvertToDoubleStub : public CodeStub { 425 public: 426 ConvertToDoubleStub(Register result_reg_1, 427 Register result_reg_2, 428 Register source_reg, 429 Register scratch_reg) 430 : result1_(result_reg_1), 431 result2_(result_reg_2), 432 source_(source_reg), 433 zeros_(scratch_reg) { } 434 435 private: 436 Register result1_; 437 Register result2_; 438 Register source_; 439 Register zeros_; 440 441 // Minor key encoding in 16 bits. 442 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; 443 class OpBits: public BitField<Token::Value, 2, 14> {}; 444 445 Major MajorKey() { return ConvertToDouble; } 446 int MinorKey() { 447 // Encode the parameters in a unique 16 bit value. 448 return result1_.code() + 449 (result2_.code() << 4) + 450 (source_.code() << 8) + 451 (zeros_.code() << 12); 452 } 453 454 void Generate(MacroAssembler* masm); 455}; 456 457 458void ConvertToDoubleStub::Generate(MacroAssembler* masm) { 459#ifndef BIG_ENDIAN_FLOATING_POINT 460 Register exponent = result1_; 461 Register mantissa = result2_; 462#else 463 Register exponent = result2_; 464 Register mantissa = result1_; 465#endif 466 Label not_special; 467 // Convert from Smi to integer. 468 __ sra(source_, source_, kSmiTagSize); 469 // Move sign bit from source to destination. This works because the sign bit 470 // in the exponent word of the double has the same position and polarity as 471 // the 2's complement sign bit in a Smi. 472 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 473 __ And(exponent, source_, Operand(HeapNumber::kSignMask)); 474 // Subtract from 0 if source was negative. 475 __ subu(at, zero_reg, source_); 476 __ Movn(source_, at, exponent); 477 478 // We have -1, 0 or 1, which we treat specially. Register source_ contains 479 // absolute value: it is either equal to 1 (special case of -1 and 1), 480 // greater than 1 (not a special case) or less than 1 (special case of 0). 481 __ Branch(¬_special, gt, source_, Operand(1)); 482 483 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). 484 const uint32_t exponent_word_for_1 = 485 HeapNumber::kExponentBias << HeapNumber::kExponentShift; 486 // Safe to use 'at' as dest reg here. 487 __ Or(at, exponent, Operand(exponent_word_for_1)); 488 __ Movn(exponent, at, source_); // Write exp when source not 0. 489 // 1, 0 and -1 all have 0 for the second word. 490 __ Ret(USE_DELAY_SLOT); 491 __ mov(mantissa, zero_reg); 492 493 __ bind(¬_special); 494 // Count leading zeros. 495 // Gets the wrong answer for 0, but we already checked for that case above. 496 __ Clz(zeros_, source_); 497 // Compute exponent and or it into the exponent register. 498 // We use mantissa as a scratch register here. 499 __ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); 500 __ subu(mantissa, mantissa, zeros_); 501 __ sll(mantissa, mantissa, HeapNumber::kExponentShift); 502 __ Or(exponent, exponent, mantissa); 503 504 // Shift up the source chopping the top bit off. 505 __ Addu(zeros_, zeros_, Operand(1)); 506 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. 507 __ sllv(source_, source_, zeros_); 508 // Compute lower part of fraction (last 12 bits). 509 __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord); 510 // And the top (top 20 bits). 511 __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord); 512 513 __ Ret(USE_DELAY_SLOT); 514 __ or_(exponent, exponent, source_); 515} 516 517 518void FloatingPointHelper::LoadSmis(MacroAssembler* masm, 519 FloatingPointHelper::Destination destination, 520 Register scratch1, 521 Register scratch2) { 522 if (CpuFeatures::IsSupported(FPU)) { 523 CpuFeatures::Scope scope(FPU); 524 __ sra(scratch1, a0, kSmiTagSize); 525 __ mtc1(scratch1, f14); 526 __ cvt_d_w(f14, f14); 527 __ sra(scratch1, a1, kSmiTagSize); 528 __ mtc1(scratch1, f12); 529 __ cvt_d_w(f12, f12); 530 if (destination == kCoreRegisters) { 531 __ Move(a2, a3, f14); 532 __ Move(a0, a1, f12); 533 } 534 } else { 535 ASSERT(destination == kCoreRegisters); 536 // Write Smi from a0 to a3 and a2 in double format. 537 __ mov(scratch1, a0); 538 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); 539 __ push(ra); 540 __ Call(stub1.GetCode()); 541 // Write Smi from a1 to a1 and a0 in double format. 542 __ mov(scratch1, a1); 543 ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); 544 __ Call(stub2.GetCode()); 545 __ pop(ra); 546 } 547} 548 549 550void FloatingPointHelper::LoadOperands( 551 MacroAssembler* masm, 552 FloatingPointHelper::Destination destination, 553 Register heap_number_map, 554 Register scratch1, 555 Register scratch2, 556 Label* slow) { 557 558 // Load right operand (a0) to f12 or a2/a3. 559 LoadNumber(masm, destination, 560 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow); 561 562 // Load left operand (a1) to f14 or a0/a1. 563 LoadNumber(masm, destination, 564 a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow); 565} 566 567 568void FloatingPointHelper::LoadNumber(MacroAssembler* masm, 569 Destination destination, 570 Register object, 571 FPURegister dst, 572 Register dst1, 573 Register dst2, 574 Register heap_number_map, 575 Register scratch1, 576 Register scratch2, 577 Label* not_number) { 578 if (FLAG_debug_code) { 579 __ AbortIfNotRootValue(heap_number_map, 580 Heap::kHeapNumberMapRootIndex, 581 "HeapNumberMap register clobbered."); 582 } 583 584 Label is_smi, done; 585 586 // Smi-check 587 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); 588 // Heap number check 589 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 590 591 // Handle loading a double from a heap number. 592 if (CpuFeatures::IsSupported(FPU) && 593 destination == kFPURegisters) { 594 CpuFeatures::Scope scope(FPU); 595 // Load the double from tagged HeapNumber to double register. 596 597 // ARM uses a workaround here because of the unaligned HeapNumber 598 // kValueOffset. On MIPS this workaround is built into ldc1 so there's no 599 // point in generating even more instructions. 600 __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); 601 } else { 602 ASSERT(destination == kCoreRegisters); 603 // Load the double from heap number to dst1 and dst2 in double format. 604 __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); 605 __ lw(dst2, FieldMemOperand(object, 606 HeapNumber::kValueOffset + kPointerSize)); 607 } 608 __ Branch(&done); 609 610 // Handle loading a double from a smi. 611 __ bind(&is_smi); 612 if (CpuFeatures::IsSupported(FPU)) { 613 CpuFeatures::Scope scope(FPU); 614 // Convert smi to double using FPU instructions. 615 __ mtc1(scratch1, dst); 616 __ cvt_d_w(dst, dst); 617 if (destination == kCoreRegisters) { 618 // Load the converted smi to dst1 and dst2 in double format. 619 __ Move(dst1, dst2, dst); 620 } 621 } else { 622 ASSERT(destination == kCoreRegisters); 623 // Write smi to dst1 and dst2 double format. 624 __ mov(scratch1, object); 625 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); 626 __ push(ra); 627 __ Call(stub.GetCode()); 628 __ pop(ra); 629 } 630 631 __ bind(&done); 632} 633 634 635void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, 636 Register object, 637 Register dst, 638 Register heap_number_map, 639 Register scratch1, 640 Register scratch2, 641 Register scratch3, 642 FPURegister double_scratch, 643 Label* not_number) { 644 if (FLAG_debug_code) { 645 __ AbortIfNotRootValue(heap_number_map, 646 Heap::kHeapNumberMapRootIndex, 647 "HeapNumberMap register clobbered."); 648 } 649 Label done; 650 Label not_in_int32_range; 651 652 __ UntagAndJumpIfSmi(dst, object, &done); 653 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); 654 __ Branch(not_number, ne, scratch1, Operand(heap_number_map)); 655 __ ConvertToInt32(object, 656 dst, 657 scratch1, 658 scratch2, 659 double_scratch, 660 ¬_in_int32_range); 661 __ jmp(&done); 662 663 __ bind(¬_in_int32_range); 664 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); 665 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 666 667 __ EmitOutOfInt32RangeTruncate(dst, 668 scratch1, 669 scratch2, 670 scratch3); 671 672 __ bind(&done); 673} 674 675 676void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, 677 Register int_scratch, 678 Destination destination, 679 FPURegister double_dst, 680 Register dst1, 681 Register dst2, 682 Register scratch2, 683 FPURegister single_scratch) { 684 ASSERT(!int_scratch.is(scratch2)); 685 ASSERT(!int_scratch.is(dst1)); 686 ASSERT(!int_scratch.is(dst2)); 687 688 Label done; 689 690 if (CpuFeatures::IsSupported(FPU)) { 691 CpuFeatures::Scope scope(FPU); 692 __ mtc1(int_scratch, single_scratch); 693 __ cvt_d_w(double_dst, single_scratch); 694 if (destination == kCoreRegisters) { 695 __ Move(dst1, dst2, double_dst); 696 } 697 } else { 698 Label fewer_than_20_useful_bits; 699 // Expected output: 700 // | dst2 | dst1 | 701 // | s | exp | mantissa | 702 703 // Check for zero. 704 __ mov(dst2, int_scratch); 705 __ mov(dst1, int_scratch); 706 __ Branch(&done, eq, int_scratch, Operand(zero_reg)); 707 708 // Preload the sign of the value. 709 __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); 710 // Get the absolute value of the object (as an unsigned integer). 711 Label skip_sub; 712 __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); 713 __ Subu(int_scratch, zero_reg, int_scratch); 714 __ bind(&skip_sub); 715 716 // Get mantissa[51:20]. 717 718 // Get the position of the first set bit. 719 __ Clz(dst1, int_scratch); 720 __ li(scratch2, 31); 721 __ Subu(dst1, scratch2, dst1); 722 723 // Set the exponent. 724 __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); 725 __ Ins(dst2, scratch2, 726 HeapNumber::kExponentShift, HeapNumber::kExponentBits); 727 728 // Clear the first non null bit. 729 __ li(scratch2, Operand(1)); 730 __ sllv(scratch2, scratch2, dst1); 731 __ li(at, -1); 732 __ Xor(scratch2, scratch2, at); 733 __ And(int_scratch, int_scratch, scratch2); 734 735 // Get the number of bits to set in the lower part of the mantissa. 736 __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); 737 __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg)); 738 // Set the higher 20 bits of the mantissa. 739 __ srlv(at, int_scratch, scratch2); 740 __ or_(dst2, dst2, at); 741 __ li(at, 32); 742 __ subu(scratch2, at, scratch2); 743 __ sllv(dst1, int_scratch, scratch2); 744 __ Branch(&done); 745 746 __ bind(&fewer_than_20_useful_bits); 747 __ li(at, HeapNumber::kMantissaBitsInTopWord); 748 __ subu(scratch2, at, dst1); 749 __ sllv(scratch2, int_scratch, scratch2); 750 __ Or(dst2, dst2, scratch2); 751 // Set dst1 to 0. 752 __ mov(dst1, zero_reg); 753 } 754 __ bind(&done); 755} 756 757 758void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, 759 Register object, 760 Destination destination, 761 DoubleRegister double_dst, 762 Register dst1, 763 Register dst2, 764 Register heap_number_map, 765 Register scratch1, 766 Register scratch2, 767 FPURegister single_scratch, 768 Label* not_int32) { 769 ASSERT(!scratch1.is(object) && !scratch2.is(object)); 770 ASSERT(!scratch1.is(scratch2)); 771 ASSERT(!heap_number_map.is(object) && 772 !heap_number_map.is(scratch1) && 773 !heap_number_map.is(scratch2)); 774 775 Label done, obj_is_not_smi; 776 777 __ JumpIfNotSmi(object, &obj_is_not_smi); 778 __ SmiUntag(scratch1, object); 779 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, 780 scratch2, single_scratch); 781 __ Branch(&done); 782 783 __ bind(&obj_is_not_smi); 784 if (FLAG_debug_code) { 785 __ AbortIfNotRootValue(heap_number_map, 786 Heap::kHeapNumberMapRootIndex, 787 "HeapNumberMap register clobbered."); 788 } 789 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 790 791 // Load the number. 792 if (CpuFeatures::IsSupported(FPU)) { 793 CpuFeatures::Scope scope(FPU); 794 // Load the double value. 795 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); 796 797 Register except_flag = scratch2; 798 __ EmitFPUTruncate(kRoundToZero, 799 single_scratch, 800 double_dst, 801 scratch1, 802 except_flag, 803 kCheckForInexactConversion); 804 805 // Jump to not_int32 if the operation did not succeed. 806 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); 807 808 if (destination == kCoreRegisters) { 809 __ Move(dst1, dst2, double_dst); 810 } 811 812 } else { 813 ASSERT(!scratch1.is(object) && !scratch2.is(object)); 814 // Load the double value in the destination registers. 815 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); 816 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 817 818 // Check for 0 and -0. 819 __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask)); 820 __ Or(scratch1, scratch1, Operand(dst2)); 821 __ Branch(&done, eq, scratch1, Operand(zero_reg)); 822 823 // Check that the value can be exactly represented by a 32-bit integer. 824 // Jump to not_int32 if that's not the case. 825 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); 826 827 // dst1 and dst2 were trashed. Reload the double value. 828 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); 829 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 830 } 831 832 __ bind(&done); 833} 834 835 836void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, 837 Register object, 838 Register dst, 839 Register heap_number_map, 840 Register scratch1, 841 Register scratch2, 842 Register scratch3, 843 DoubleRegister double_scratch, 844 Label* not_int32) { 845 ASSERT(!dst.is(object)); 846 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); 847 ASSERT(!scratch1.is(scratch2) && 848 !scratch1.is(scratch3) && 849 !scratch2.is(scratch3)); 850 851 Label done; 852 853 __ UntagAndJumpIfSmi(dst, object, &done); 854 855 if (FLAG_debug_code) { 856 __ AbortIfNotRootValue(heap_number_map, 857 Heap::kHeapNumberMapRootIndex, 858 "HeapNumberMap register clobbered."); 859 } 860 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); 861 862 // Object is a heap number. 863 // Convert the floating point value to a 32-bit integer. 864 if (CpuFeatures::IsSupported(FPU)) { 865 CpuFeatures::Scope scope(FPU); 866 // Load the double value. 867 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); 868 869 FPURegister single_scratch = double_scratch.low(); 870 Register except_flag = scratch2; 871 __ EmitFPUTruncate(kRoundToZero, 872 single_scratch, 873 double_scratch, 874 scratch1, 875 except_flag, 876 kCheckForInexactConversion); 877 878 // Jump to not_int32 if the operation did not succeed. 879 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); 880 // Get the result in the destination register. 881 __ mfc1(dst, single_scratch); 882 883 } else { 884 // Load the double value in the destination registers. 885 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); 886 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); 887 888 // Check for 0 and -0. 889 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask)); 890 __ Or(dst, scratch2, Operand(dst)); 891 __ Branch(&done, eq, dst, Operand(zero_reg)); 892 893 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); 894 895 // Registers state after DoubleIs32BitInteger. 896 // dst: mantissa[51:20]. 897 // scratch2: 1 898 899 // Shift back the higher bits of the mantissa. 900 __ srlv(dst, dst, scratch3); 901 // Set the implicit first bit. 902 __ li(at, 32); 903 __ subu(scratch3, at, scratch3); 904 __ sllv(scratch2, scratch2, scratch3); 905 __ Or(dst, dst, scratch2); 906 // Set the sign. 907 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); 908 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 909 Label skip_sub; 910 __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg)); 911 __ Subu(dst, zero_reg, dst); 912 __ bind(&skip_sub); 913 } 914 915 __ bind(&done); 916} 917 918 919void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, 920 Register src1, 921 Register src2, 922 Register dst, 923 Register scratch, 924 Label* not_int32) { 925 // Get exponent alone in scratch. 926 __ Ext(scratch, 927 src1, 928 HeapNumber::kExponentShift, 929 HeapNumber::kExponentBits); 930 931 // Substract the bias from the exponent. 932 __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias)); 933 934 // src1: higher (exponent) part of the double value. 935 // src2: lower (mantissa) part of the double value. 936 // scratch: unbiased exponent. 937 938 // Fast cases. Check for obvious non 32-bit integer values. 939 // Negative exponent cannot yield 32-bit integers. 940 __ Branch(not_int32, lt, scratch, Operand(zero_reg)); 941 // Exponent greater than 31 cannot yield 32-bit integers. 942 // Also, a positive value with an exponent equal to 31 is outside of the 943 // signed 32-bit integer range. 944 // Another way to put it is that if (exponent - signbit) > 30 then the 945 // number cannot be represented as an int32. 946 Register tmp = dst; 947 __ srl(at, src1, 31); 948 __ subu(tmp, scratch, at); 949 __ Branch(not_int32, gt, tmp, Operand(30)); 950 // - Bits [21:0] in the mantissa are not null. 951 __ And(tmp, src2, 0x3fffff); 952 __ Branch(not_int32, ne, tmp, Operand(zero_reg)); 953 954 // Otherwise the exponent needs to be big enough to shift left all the 955 // non zero bits left. So we need the (30 - exponent) last bits of the 956 // 31 higher bits of the mantissa to be null. 957 // Because bits [21:0] are null, we can check instead that the 958 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. 959 960 // Get the 32 higher bits of the mantissa in dst. 961 __ Ext(dst, 962 src2, 963 HeapNumber::kMantissaBitsInTopWord, 964 32 - HeapNumber::kMantissaBitsInTopWord); 965 __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord); 966 __ or_(dst, dst, at); 967 968 // Create the mask and test the lower bits (of the higher bits). 969 __ li(at, 32); 970 __ subu(scratch, at, scratch); 971 __ li(src2, 1); 972 __ sllv(src1, src2, scratch); 973 __ Subu(src1, src1, Operand(1)); 974 __ And(src1, dst, src1); 975 __ Branch(not_int32, ne, src1, Operand(zero_reg)); 976} 977 978 979void FloatingPointHelper::CallCCodeForDoubleOperation( 980 MacroAssembler* masm, 981 Token::Value op, 982 Register heap_number_result, 983 Register scratch) { 984 // Using core registers: 985 // a0: Left value (least significant part of mantissa). 986 // a1: Left value (sign, exponent, top of mantissa). 987 // a2: Right value (least significant part of mantissa). 988 // a3: Right value (sign, exponent, top of mantissa). 989 990 // Assert that heap_number_result is saved. 991 // We currently always use s0 to pass it. 992 ASSERT(heap_number_result.is(s0)); 993 994 // Push the current return address before the C call. 995 __ push(ra); 996 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. 997 if (!IsMipsSoftFloatABI) { 998 CpuFeatures::Scope scope(FPU); 999 // We are not using MIPS FPU instructions, and parameters for the runtime 1000 // function call are prepaired in a0-a3 registers, but function we are 1001 // calling is compiled with hard-float flag and expecting hard float ABI 1002 // (parameters in f12/f14 registers). We need to copy parameters from 1003 // a0-a3 registers to f12/f14 register pairs. 1004 __ Move(f12, a0, a1); 1005 __ Move(f14, a2, a3); 1006 } 1007 { 1008 AllowExternalCallThatCantCauseGC scope(masm); 1009 __ CallCFunction( 1010 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); 1011 } 1012 // Store answer in the overwritable heap number. 1013 if (!IsMipsSoftFloatABI) { 1014 CpuFeatures::Scope scope(FPU); 1015 // Double returned in register f0. 1016 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); 1017 } else { 1018 // Double returned in registers v0 and v1. 1019 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); 1020 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); 1021 } 1022 // Place heap_number_result in v0 and return to the pushed return address. 1023 __ pop(ra); 1024 __ Ret(USE_DELAY_SLOT); 1025 __ mov(v0, heap_number_result); 1026} 1027 1028 1029bool WriteInt32ToHeapNumberStub::IsPregenerated() { 1030 // These variants are compiled ahead of time. See next method. 1031 if (the_int_.is(a1) && 1032 the_heap_number_.is(v0) && 1033 scratch_.is(a2) && 1034 sign_.is(a3)) { 1035 return true; 1036 } 1037 if (the_int_.is(a2) && 1038 the_heap_number_.is(v0) && 1039 scratch_.is(a3) && 1040 sign_.is(a0)) { 1041 return true; 1042 } 1043 // Other register combinations are generated as and when they are needed, 1044 // so it is unsafe to call them from stubs (we can't generate a stub while 1045 // we are generating a stub). 1046 return false; 1047} 1048 1049 1050void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { 1051 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3); 1052 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0); 1053 stub1.GetCode()->set_is_pregenerated(true); 1054 stub2.GetCode()->set_is_pregenerated(true); 1055} 1056 1057 1058// See comment for class, this does NOT work for int32's that are in Smi range. 1059void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 1060 Label max_negative_int; 1061 // the_int_ has the answer which is a signed int32 but not a Smi. 1062 // We test for the special value that has a different exponent. 1063 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 1064 // Test sign, and save for later conditionals. 1065 __ And(sign_, the_int_, Operand(0x80000000u)); 1066 __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u)); 1067 1068 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 1069 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). 1070 uint32_t non_smi_exponent = 1071 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; 1072 __ li(scratch_, Operand(non_smi_exponent)); 1073 // Set the sign bit in scratch_ if the value was negative. 1074 __ or_(scratch_, scratch_, sign_); 1075 // Subtract from 0 if the value was negative. 1076 __ subu(at, zero_reg, the_int_); 1077 __ Movn(the_int_, at, sign_); 1078 // We should be masking the implict first digit of the mantissa away here, 1079 // but it just ends up combining harmlessly with the last digit of the 1080 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get 1081 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. 1082 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); 1083 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 1084 __ srl(at, the_int_, shift_distance); 1085 __ or_(scratch_, scratch_, at); 1086 __ sw(scratch_, FieldMemOperand(the_heap_number_, 1087 HeapNumber::kExponentOffset)); 1088 __ sll(scratch_, the_int_, 32 - shift_distance); 1089 __ sw(scratch_, FieldMemOperand(the_heap_number_, 1090 HeapNumber::kMantissaOffset)); 1091 __ Ret(); 1092 1093 __ bind(&max_negative_int); 1094 // The max negative int32 is stored as a positive number in the mantissa of 1095 // a double because it uses a sign bit instead of using two's complement. 1096 // The actual mantissa bits stored are all 0 because the implicit most 1097 // significant 1 bit is not stored. 1098 non_smi_exponent += 1 << HeapNumber::kExponentShift; 1099 __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent)); 1100 __ sw(scratch_, 1101 FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); 1102 __ mov(scratch_, zero_reg); 1103 __ sw(scratch_, 1104 FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); 1105 __ Ret(); 1106} 1107 1108 1109// Handle the case where the lhs and rhs are the same object. 1110// Equality is almost reflexive (everything but NaN), so this is a test 1111// for "identity and not NaN". 1112static void EmitIdenticalObjectComparison(MacroAssembler* masm, 1113 Label* slow, 1114 Condition cc, 1115 bool never_nan_nan) { 1116 Label not_identical; 1117 Label heap_number, return_equal; 1118 Register exp_mask_reg = t5; 1119 1120 __ Branch(¬_identical, ne, a0, Operand(a1)); 1121 1122 // The two objects are identical. If we know that one of them isn't NaN then 1123 // we now know they test equal. 1124 if (cc != eq || !never_nan_nan) { 1125 __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); 1126 1127 // Test for NaN. Sadly, we can't just compare to factory->nan_value(), 1128 // so we do the second best thing - test it ourselves. 1129 // They are both equal and they are not both Smis so both of them are not 1130 // Smis. If it's not a heap number, then return equal. 1131 if (cc == less || cc == greater) { 1132 __ GetObjectType(a0, t4, t4); 1133 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); 1134 } else { 1135 __ GetObjectType(a0, t4, t4); 1136 __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); 1137 // Comparing JS objects with <=, >= is complicated. 1138 if (cc != eq) { 1139 __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); 1140 // Normally here we fall through to return_equal, but undefined is 1141 // special: (undefined == undefined) == true, but 1142 // (undefined <= undefined) == false! See ECMAScript 11.8.5. 1143 if (cc == less_equal || cc == greater_equal) { 1144 __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); 1145 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); 1146 __ Branch(&return_equal, ne, a0, Operand(t2)); 1147 if (cc == le) { 1148 // undefined <= undefined should fail. 1149 __ li(v0, Operand(GREATER)); 1150 } else { 1151 // undefined >= undefined should fail. 1152 __ li(v0, Operand(LESS)); 1153 } 1154 __ Ret(); 1155 } 1156 } 1157 } 1158 } 1159 1160 __ bind(&return_equal); 1161 1162 if (cc == less) { 1163 __ li(v0, Operand(GREATER)); // Things aren't less than themselves. 1164 } else if (cc == greater) { 1165 __ li(v0, Operand(LESS)); // Things aren't greater than themselves. 1166 } else { 1167 __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves. 1168 } 1169 __ Ret(); 1170 1171 if (cc != eq || !never_nan_nan) { 1172 // For less and greater we don't have to check for NaN since the result of 1173 // x < x is false regardless. For the others here is some code to check 1174 // for NaN. 1175 if (cc != lt && cc != gt) { 1176 __ bind(&heap_number); 1177 // It is a heap number, so return non-equal if it's NaN and equal if it's 1178 // not NaN. 1179 1180 // The representation of NaN values has all exponent bits (52..62) set, 1181 // and not all mantissa bits (0..51) clear. 1182 // Read top bits of double representation (second word of value). 1183 __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 1184 // Test that exponent bits are all set. 1185 __ And(t3, t2, Operand(exp_mask_reg)); 1186 // If all bits not set (ne cond), then not a NaN, objects are equal. 1187 __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); 1188 1189 // Shift out flag and all exponent bits, retaining only mantissa. 1190 __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); 1191 // Or with all low-bits of mantissa. 1192 __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); 1193 __ Or(v0, t3, Operand(t2)); 1194 // For equal we already have the right value in v0: Return zero (equal) 1195 // if all bits in mantissa are zero (it's an Infinity) and non-zero if 1196 // not (it's a NaN). For <= and >= we need to load v0 with the failing 1197 // value if it's a NaN. 1198 if (cc != eq) { 1199 // All-zero means Infinity means equal. 1200 __ Ret(eq, v0, Operand(zero_reg)); 1201 if (cc == le) { 1202 __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. 1203 } else { 1204 __ li(v0, Operand(LESS)); // NaN >= NaN should fail. 1205 } 1206 } 1207 __ Ret(); 1208 } 1209 // No fall through here. 1210 } 1211 1212 __ bind(¬_identical); 1213} 1214 1215 1216static void EmitSmiNonsmiComparison(MacroAssembler* masm, 1217 Register lhs, 1218 Register rhs, 1219 Label* both_loaded_as_doubles, 1220 Label* slow, 1221 bool strict) { 1222 ASSERT((lhs.is(a0) && rhs.is(a1)) || 1223 (lhs.is(a1) && rhs.is(a0))); 1224 1225 Label lhs_is_smi; 1226 __ JumpIfSmi(lhs, &lhs_is_smi); 1227 // Rhs is a Smi. 1228 // Check whether the non-smi is a heap number. 1229 __ GetObjectType(lhs, t4, t4); 1230 if (strict) { 1231 // If lhs was not a number and rhs was a Smi then strict equality cannot 1232 // succeed. Return non-equal (lhs is already not zero). 1233 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1234 __ mov(v0, lhs); 1235 } else { 1236 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1237 // the runtime. 1238 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1239 } 1240 1241 // Rhs is a smi, lhs is a number. 1242 // Convert smi rhs to double. 1243 if (CpuFeatures::IsSupported(FPU)) { 1244 CpuFeatures::Scope scope(FPU); 1245 __ sra(at, rhs, kSmiTagSize); 1246 __ mtc1(at, f14); 1247 __ cvt_d_w(f14, f14); 1248 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1249 } else { 1250 // Load lhs to a double in a2, a3. 1251 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); 1252 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1253 1254 // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. 1255 __ mov(t6, rhs); 1256 ConvertToDoubleStub stub1(a1, a0, t6, t5); 1257 __ push(ra); 1258 __ Call(stub1.GetCode()); 1259 1260 __ pop(ra); 1261 } 1262 1263 // We now have both loaded as doubles. 1264 __ jmp(both_loaded_as_doubles); 1265 1266 __ bind(&lhs_is_smi); 1267 // Lhs is a Smi. Check whether the non-smi is a heap number. 1268 __ GetObjectType(rhs, t4, t4); 1269 if (strict) { 1270 // If lhs was not a number and rhs was a Smi then strict equality cannot 1271 // succeed. Return non-equal. 1272 __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1273 __ li(v0, Operand(1)); 1274 } else { 1275 // Smi compared non-strictly with a non-Smi non-heap-number. Call 1276 // the runtime. 1277 __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE)); 1278 } 1279 1280 // Lhs is a smi, rhs is a number. 1281 // Convert smi lhs to double. 1282 if (CpuFeatures::IsSupported(FPU)) { 1283 CpuFeatures::Scope scope(FPU); 1284 __ sra(at, lhs, kSmiTagSize); 1285 __ mtc1(at, f12); 1286 __ cvt_d_w(f12, f12); 1287 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1288 } else { 1289 // Convert lhs to a double format. t5 is scratch. 1290 __ mov(t6, lhs); 1291 ConvertToDoubleStub stub2(a3, a2, t6, t5); 1292 __ push(ra); 1293 __ Call(stub2.GetCode()); 1294 __ pop(ra); 1295 // Load rhs to a double in a1, a0. 1296 if (rhs.is(a0)) { 1297 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1298 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1299 } else { 1300 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1301 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1302 } 1303 } 1304 // Fall through to both_loaded_as_doubles. 1305} 1306 1307 1308void EmitNanCheck(MacroAssembler* masm, Condition cc) { 1309 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 1310 if (CpuFeatures::IsSupported(FPU)) { 1311 CpuFeatures::Scope scope(FPU); 1312 // Lhs and rhs are already loaded to f12 and f14 register pairs. 1313 __ Move(t0, t1, f14); 1314 __ Move(t2, t3, f12); 1315 } else { 1316 // Lhs and rhs are already loaded to GP registers. 1317 __ mov(t0, a0); // a0 has LS 32 bits of rhs. 1318 __ mov(t1, a1); // a1 has MS 32 bits of rhs. 1319 __ mov(t2, a2); // a2 has LS 32 bits of lhs. 1320 __ mov(t3, a3); // a3 has MS 32 bits of lhs. 1321 } 1322 Register rhs_exponent = exp_first ? t0 : t1; 1323 Register lhs_exponent = exp_first ? t2 : t3; 1324 Register rhs_mantissa = exp_first ? t1 : t0; 1325 Register lhs_mantissa = exp_first ? t3 : t2; 1326 Label one_is_nan, neither_is_nan; 1327 Label lhs_not_nan_exp_mask_is_loaded; 1328 1329 Register exp_mask_reg = t4; 1330 __ li(exp_mask_reg, HeapNumber::kExponentMask); 1331 __ and_(t5, lhs_exponent, exp_mask_reg); 1332 __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg)); 1333 1334 __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); 1335 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); 1336 1337 __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg)); 1338 1339 __ li(exp_mask_reg, HeapNumber::kExponentMask); 1340 __ bind(&lhs_not_nan_exp_mask_is_loaded); 1341 __ and_(t5, rhs_exponent, exp_mask_reg); 1342 1343 __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg)); 1344 1345 __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord); 1346 __ Branch(&one_is_nan, ne, t5, Operand(zero_reg)); 1347 1348 __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg)); 1349 1350 __ bind(&one_is_nan); 1351 // NaN comparisons always fail. 1352 // Load whatever we need in v0 to make the comparison fail. 1353 1354 if (cc == lt || cc == le) { 1355 __ li(v0, Operand(GREATER)); 1356 } else { 1357 __ li(v0, Operand(LESS)); 1358 } 1359 __ Ret(); 1360 1361 __ bind(&neither_is_nan); 1362} 1363 1364 1365static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { 1366 // f12 and f14 have the two doubles. Neither is a NaN. 1367 // Call a native function to do a comparison between two non-NaNs. 1368 // Call C routine that may not cause GC or other trouble. 1369 // We use a call_was and return manually because we need arguments slots to 1370 // be freed. 1371 1372 Label return_result_not_equal, return_result_equal; 1373 if (cc == eq) { 1374 // Doubles are not equal unless they have the same bit pattern. 1375 // Exception: 0 and -0. 1376 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 1377 if (CpuFeatures::IsSupported(FPU)) { 1378 CpuFeatures::Scope scope(FPU); 1379 // Lhs and rhs are already loaded to f12 and f14 register pairs. 1380 __ Move(t0, t1, f14); 1381 __ Move(t2, t3, f12); 1382 } else { 1383 // Lhs and rhs are already loaded to GP registers. 1384 __ mov(t0, a0); // a0 has LS 32 bits of rhs. 1385 __ mov(t1, a1); // a1 has MS 32 bits of rhs. 1386 __ mov(t2, a2); // a2 has LS 32 bits of lhs. 1387 __ mov(t3, a3); // a3 has MS 32 bits of lhs. 1388 } 1389 Register rhs_exponent = exp_first ? t0 : t1; 1390 Register lhs_exponent = exp_first ? t2 : t3; 1391 Register rhs_mantissa = exp_first ? t1 : t0; 1392 Register lhs_mantissa = exp_first ? t3 : t2; 1393 1394 __ xor_(v0, rhs_mantissa, lhs_mantissa); 1395 __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg)); 1396 1397 __ subu(v0, rhs_exponent, lhs_exponent); 1398 __ Branch(&return_result_equal, eq, v0, Operand(zero_reg)); 1399 // 0, -0 case. 1400 __ sll(rhs_exponent, rhs_exponent, kSmiTagSize); 1401 __ sll(lhs_exponent, lhs_exponent, kSmiTagSize); 1402 __ or_(t4, rhs_exponent, lhs_exponent); 1403 __ or_(t4, t4, rhs_mantissa); 1404 1405 __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg)); 1406 1407 __ bind(&return_result_equal); 1408 1409 __ li(v0, Operand(EQUAL)); 1410 __ Ret(); 1411 } 1412 1413 __ bind(&return_result_not_equal); 1414 1415 if (!CpuFeatures::IsSupported(FPU)) { 1416 __ push(ra); 1417 __ PrepareCallCFunction(0, 2, t4); 1418 if (!IsMipsSoftFloatABI) { 1419 // We are not using MIPS FPU instructions, and parameters for the runtime 1420 // function call are prepaired in a0-a3 registers, but function we are 1421 // calling is compiled with hard-float flag and expecting hard float ABI 1422 // (parameters in f12/f14 registers). We need to copy parameters from 1423 // a0-a3 registers to f12/f14 register pairs. 1424 __ Move(f12, a0, a1); 1425 __ Move(f14, a2, a3); 1426 } 1427 1428 AllowExternalCallThatCantCauseGC scope(masm); 1429 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 1430 0, 2); 1431 __ pop(ra); // Because this function returns int, result is in v0. 1432 __ Ret(); 1433 } else { 1434 CpuFeatures::Scope scope(FPU); 1435 Label equal, less_than; 1436 __ BranchF(&equal, NULL, eq, f12, f14); 1437 __ BranchF(&less_than, NULL, lt, f12, f14); 1438 1439 // Not equal, not less, not NaN, must be greater. 1440 1441 __ li(v0, Operand(GREATER)); 1442 __ Ret(); 1443 1444 __ bind(&equal); 1445 __ li(v0, Operand(EQUAL)); 1446 __ Ret(); 1447 1448 __ bind(&less_than); 1449 __ li(v0, Operand(LESS)); 1450 __ Ret(); 1451 } 1452} 1453 1454 1455static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 1456 Register lhs, 1457 Register rhs) { 1458 // If either operand is a JS object or an oddball value, then they are 1459 // not equal since their pointers are different. 1460 // There is no test for undetectability in strict equality. 1461 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); 1462 Label first_non_object; 1463 // Get the type of the first operand into a2 and compare it with 1464 // FIRST_SPEC_OBJECT_TYPE. 1465 __ GetObjectType(lhs, a2, a2); 1466 __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); 1467 1468 // Return non-zero. 1469 Label return_not_equal; 1470 __ bind(&return_not_equal); 1471 __ Ret(USE_DELAY_SLOT); 1472 __ li(v0, Operand(1)); 1473 1474 __ bind(&first_non_object); 1475 // Check for oddballs: true, false, null, undefined. 1476 __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE)); 1477 1478 __ GetObjectType(rhs, a3, a3); 1479 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); 1480 1481 // Check for oddballs: true, false, null, undefined. 1482 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE)); 1483 1484 // Now that we have the types we might as well check for symbol-symbol. 1485 // Ensure that no non-strings have the symbol bit set. 1486 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); 1487 STATIC_ASSERT(kSymbolTag != 0); 1488 __ And(t2, a2, Operand(a3)); 1489 __ And(t0, t2, Operand(kIsSymbolMask)); 1490 __ Branch(&return_not_equal, ne, t0, Operand(zero_reg)); 1491} 1492 1493 1494static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, 1495 Register lhs, 1496 Register rhs, 1497 Label* both_loaded_as_doubles, 1498 Label* not_heap_numbers, 1499 Label* slow) { 1500 __ GetObjectType(lhs, a3, a2); 1501 __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE)); 1502 __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset)); 1503 // If first was a heap number & second wasn't, go to slow case. 1504 __ Branch(slow, ne, a3, Operand(a2)); 1505 1506 // Both are heap numbers. Load them up then jump to the code we have 1507 // for that. 1508 if (CpuFeatures::IsSupported(FPU)) { 1509 CpuFeatures::Scope scope(FPU); 1510 __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1511 __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1512 } else { 1513 __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); 1514 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); 1515 if (rhs.is(a0)) { 1516 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1517 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1518 } else { 1519 __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); 1520 __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); 1521 } 1522 } 1523 __ jmp(both_loaded_as_doubles); 1524} 1525 1526 1527// Fast negative check for symbol-to-symbol equality. 1528static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, 1529 Register lhs, 1530 Register rhs, 1531 Label* possible_strings, 1532 Label* not_both_strings) { 1533 ASSERT((lhs.is(a0) && rhs.is(a1)) || 1534 (lhs.is(a1) && rhs.is(a0))); 1535 1536 // a2 is object type of lhs. 1537 // Ensure that no non-strings have the symbol bit set. 1538 Label object_test; 1539 STATIC_ASSERT(kSymbolTag != 0); 1540 __ And(at, a2, Operand(kIsNotStringMask)); 1541 __ Branch(&object_test, ne, at, Operand(zero_reg)); 1542 __ And(at, a2, Operand(kIsSymbolMask)); 1543 __ Branch(possible_strings, eq, at, Operand(zero_reg)); 1544 __ GetObjectType(rhs, a3, a3); 1545 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); 1546 __ And(at, a3, Operand(kIsSymbolMask)); 1547 __ Branch(possible_strings, eq, at, Operand(zero_reg)); 1548 1549 // Both are symbols. We already checked they weren't the same pointer 1550 // so they are not equal. 1551 __ Ret(USE_DELAY_SLOT); 1552 __ li(v0, Operand(1)); // Non-zero indicates not equal. 1553 1554 __ bind(&object_test); 1555 __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE)); 1556 __ GetObjectType(rhs, a2, a3); 1557 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); 1558 1559 // If both objects are undetectable, they are equal. Otherwise, they 1560 // are not equal, since they are different objects and an object is not 1561 // equal to undefined. 1562 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset)); 1563 __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset)); 1564 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset)); 1565 __ and_(a0, a2, a3); 1566 __ And(a0, a0, Operand(1 << Map::kIsUndetectable)); 1567 __ Ret(USE_DELAY_SLOT); 1568 __ xori(v0, a0, 1 << Map::kIsUndetectable); 1569} 1570 1571 1572void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, 1573 Register object, 1574 Register result, 1575 Register scratch1, 1576 Register scratch2, 1577 Register scratch3, 1578 bool object_is_smi, 1579 Label* not_found) { 1580 // Use of registers. Register result is used as a temporary. 1581 Register number_string_cache = result; 1582 Register mask = scratch3; 1583 1584 // Load the number string cache. 1585 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 1586 1587 // Make the hash mask from the length of the number string cache. It 1588 // contains two elements (number and string) for each cache entry. 1589 __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); 1590 // Divide length by two (length is a smi). 1591 __ sra(mask, mask, kSmiTagSize + 1); 1592 __ Addu(mask, mask, -1); // Make mask. 1593 1594 // Calculate the entry in the number string cache. The hash value in the 1595 // number string cache for smis is just the smi value, and the hash for 1596 // doubles is the xor of the upper and lower words. See 1597 // Heap::GetNumberStringCache. 1598 Isolate* isolate = masm->isolate(); 1599 Label is_smi; 1600 Label load_result_from_cache; 1601 if (!object_is_smi) { 1602 __ JumpIfSmi(object, &is_smi); 1603 if (CpuFeatures::IsSupported(FPU)) { 1604 CpuFeatures::Scope scope(FPU); 1605 __ CheckMap(object, 1606 scratch1, 1607 Heap::kHeapNumberMapRootIndex, 1608 not_found, 1609 DONT_DO_SMI_CHECK); 1610 1611 STATIC_ASSERT(8 == kDoubleSize); 1612 __ Addu(scratch1, 1613 object, 1614 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); 1615 __ lw(scratch2, MemOperand(scratch1, kPointerSize)); 1616 __ lw(scratch1, MemOperand(scratch1, 0)); 1617 __ Xor(scratch1, scratch1, Operand(scratch2)); 1618 __ And(scratch1, scratch1, Operand(mask)); 1619 1620 // Calculate address of entry in string cache: each entry consists 1621 // of two pointer sized fields. 1622 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); 1623 __ Addu(scratch1, number_string_cache, scratch1); 1624 1625 Register probe = mask; 1626 __ lw(probe, 1627 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); 1628 __ JumpIfSmi(probe, not_found); 1629 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); 1630 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); 1631 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); 1632 __ Branch(not_found); 1633 } else { 1634 // Note that there is no cache check for non-FPU case, even though 1635 // it seems there could be. May be a tiny opimization for non-FPU 1636 // cores. 1637 __ Branch(not_found); 1638 } 1639 } 1640 1641 __ bind(&is_smi); 1642 Register scratch = scratch1; 1643 __ sra(scratch, object, 1); // Shift away the tag. 1644 __ And(scratch, mask, Operand(scratch)); 1645 1646 // Calculate address of entry in string cache: each entry consists 1647 // of two pointer sized fields. 1648 __ sll(scratch, scratch, kPointerSizeLog2 + 1); 1649 __ Addu(scratch, number_string_cache, scratch); 1650 1651 // Check if the entry is the smi we are looking for. 1652 Register probe = mask; 1653 __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); 1654 __ Branch(not_found, ne, object, Operand(probe)); 1655 1656 // Get the result from the cache. 1657 __ bind(&load_result_from_cache); 1658 __ lw(result, 1659 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); 1660 1661 __ IncrementCounter(isolate->counters()->number_to_string_native(), 1662 1, 1663 scratch1, 1664 scratch2); 1665} 1666 1667 1668void NumberToStringStub::Generate(MacroAssembler* masm) { 1669 Label runtime; 1670 1671 __ lw(a1, MemOperand(sp, 0)); 1672 1673 // Generate code to lookup number in the number string cache. 1674 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); 1675 __ DropAndRet(1); 1676 1677 __ bind(&runtime); 1678 // Handle number to string in the runtime system if not found in the cache. 1679 __ TailCallRuntime(Runtime::kNumberToString, 1, 1); 1680} 1681 1682 1683// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared. 1684// On exit, v0 is 0, positive, or negative (smi) to indicate the result 1685// of the comparison. 1686void CompareStub::Generate(MacroAssembler* masm) { 1687 Label slow; // Call builtin. 1688 Label not_smis, both_loaded_as_doubles; 1689 1690 1691 if (include_smi_compare_) { 1692 Label not_two_smis, smi_done; 1693 __ Or(a2, a1, a0); 1694 __ JumpIfNotSmi(a2, ¬_two_smis); 1695 __ sra(a1, a1, 1); 1696 __ sra(a0, a0, 1); 1697 __ Ret(USE_DELAY_SLOT); 1698 __ subu(v0, a1, a0); 1699 __ bind(¬_two_smis); 1700 } else if (FLAG_debug_code) { 1701 __ Or(a2, a1, a0); 1702 __ And(a2, a2, kSmiTagMask); 1703 __ Assert(ne, "CompareStub: unexpected smi operands.", 1704 a2, Operand(zero_reg)); 1705 } 1706 1707 1708 // NOTICE! This code is only reached after a smi-fast-case check, so 1709 // it is certain that at least one operand isn't a smi. 1710 1711 // Handle the case where the objects are identical. Either returns the answer 1712 // or goes to slow. Only falls through if the objects were not identical. 1713 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); 1714 1715 // If either is a Smi (we know that not both are), then they can only 1716 // be strictly equal if the other is a HeapNumber. 1717 STATIC_ASSERT(kSmiTag == 0); 1718 ASSERT_EQ(0, Smi::FromInt(0)); 1719 __ And(t2, lhs_, Operand(rhs_)); 1720 __ JumpIfNotSmi(t2, ¬_smis, t0); 1721 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: 1722 // 1) Return the answer. 1723 // 2) Go to slow. 1724 // 3) Fall through to both_loaded_as_doubles. 1725 // 4) Jump to rhs_not_nan. 1726 // In cases 3 and 4 we have found out we were dealing with a number-number 1727 // comparison and the numbers have been loaded into f12 and f14 as doubles, 1728 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. 1729 EmitSmiNonsmiComparison(masm, lhs_, rhs_, 1730 &both_loaded_as_doubles, &slow, strict_); 1731 1732 __ bind(&both_loaded_as_doubles); 1733 // f12, f14 are the double representations of the left hand side 1734 // and the right hand side if we have FPU. Otherwise a2, a3 represent 1735 // left hand side and a0, a1 represent right hand side. 1736 1737 Isolate* isolate = masm->isolate(); 1738 if (CpuFeatures::IsSupported(FPU)) { 1739 CpuFeatures::Scope scope(FPU); 1740 Label nan; 1741 __ li(t0, Operand(LESS)); 1742 __ li(t1, Operand(GREATER)); 1743 __ li(t2, Operand(EQUAL)); 1744 1745 // Check if either rhs or lhs is NaN. 1746 __ BranchF(NULL, &nan, eq, f12, f14); 1747 1748 // Check if LESS condition is satisfied. If true, move conditionally 1749 // result to v0. 1750 __ c(OLT, D, f12, f14); 1751 __ Movt(v0, t0); 1752 // Use previous check to store conditionally to v0 oposite condition 1753 // (GREATER). If rhs is equal to lhs, this will be corrected in next 1754 // check. 1755 __ Movf(v0, t1); 1756 // Check if EQUAL condition is satisfied. If true, move conditionally 1757 // result to v0. 1758 __ c(EQ, D, f12, f14); 1759 __ Movt(v0, t2); 1760 1761 __ Ret(); 1762 1763 __ bind(&nan); 1764 // NaN comparisons always fail. 1765 // Load whatever we need in v0 to make the comparison fail. 1766 if (cc_ == lt || cc_ == le) { 1767 __ li(v0, Operand(GREATER)); 1768 } else { 1769 __ li(v0, Operand(LESS)); 1770 } 1771 __ Ret(); 1772 } else { 1773 // Checks for NaN in the doubles we have loaded. Can return the answer or 1774 // fall through if neither is a NaN. Also binds rhs_not_nan. 1775 EmitNanCheck(masm, cc_); 1776 1777 // Compares two doubles that are not NaNs. Returns the answer. 1778 // Never falls through. 1779 EmitTwoNonNanDoubleComparison(masm, cc_); 1780 } 1781 1782 __ bind(¬_smis); 1783 // At this point we know we are dealing with two different objects, 1784 // and neither of them is a Smi. The objects are in lhs_ and rhs_. 1785 if (strict_) { 1786 // This returns non-equal for some object types, or falls through if it 1787 // was not lucky. 1788 EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); 1789 } 1790 1791 Label check_for_symbols; 1792 Label flat_string_check; 1793 // Check for heap-number-heap-number comparison. Can jump to slow case, 1794 // or load both doubles and jump to the code that handles 1795 // that case. If the inputs are not doubles then jumps to check_for_symbols. 1796 // In this case a2 will contain the type of lhs_. 1797 EmitCheckForTwoHeapNumbers(masm, 1798 lhs_, 1799 rhs_, 1800 &both_loaded_as_doubles, 1801 &check_for_symbols, 1802 &flat_string_check); 1803 1804 __ bind(&check_for_symbols); 1805 if (cc_ == eq && !strict_) { 1806 // Returns an answer for two symbols or two detectable objects. 1807 // Otherwise jumps to string case or not both strings case. 1808 // Assumes that a2 is the type of lhs_ on entry. 1809 EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); 1810 } 1811 1812 // Check for both being sequential ASCII strings, and inline if that is the 1813 // case. 1814 __ bind(&flat_string_check); 1815 1816 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow); 1817 1818 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3); 1819 if (cc_ == eq) { 1820 StringCompareStub::GenerateFlatAsciiStringEquals(masm, 1821 lhs_, 1822 rhs_, 1823 a2, 1824 a3, 1825 t0); 1826 } else { 1827 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, 1828 lhs_, 1829 rhs_, 1830 a2, 1831 a3, 1832 t0, 1833 t1); 1834 } 1835 // Never falls through to here. 1836 1837 __ bind(&slow); 1838 // Prepare for call to builtin. Push object pointers, a0 (lhs) first, 1839 // a1 (rhs) second. 1840 __ Push(lhs_, rhs_); 1841 // Figure out which native to call and setup the arguments. 1842 Builtins::JavaScript native; 1843 if (cc_ == eq) { 1844 native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; 1845 } else { 1846 native = Builtins::COMPARE; 1847 int ncr; // NaN compare result. 1848 if (cc_ == lt || cc_ == le) { 1849 ncr = GREATER; 1850 } else { 1851 ASSERT(cc_ == gt || cc_ == ge); // Remaining cases. 1852 ncr = LESS; 1853 } 1854 __ li(a0, Operand(Smi::FromInt(ncr))); 1855 __ push(a0); 1856 } 1857 1858 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1859 // tagged as a small integer. 1860 __ InvokeBuiltin(native, JUMP_FUNCTION); 1861} 1862 1863 1864// The stub expects its argument in the tos_ register and returns its result in 1865// it, too: zero for false, and a non-zero value for true. 1866void ToBooleanStub::Generate(MacroAssembler* masm) { 1867 // This stub uses FPU instructions. 1868 CpuFeatures::Scope scope(FPU); 1869 1870 Label patch; 1871 const Register map = t5.is(tos_) ? t3 : t5; 1872 1873 // undefined -> false. 1874 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); 1875 1876 // Boolean -> its value. 1877 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); 1878 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); 1879 1880 // 'null' -> false. 1881 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); 1882 1883 if (types_.Contains(SMI)) { 1884 // Smis: 0 -> false, all other -> true 1885 __ And(at, tos_, kSmiTagMask); 1886 // tos_ contains the correct return value already 1887 __ Ret(eq, at, Operand(zero_reg)); 1888 } else if (types_.NeedsMap()) { 1889 // If we need a map later and have a Smi -> patch. 1890 __ JumpIfSmi(tos_, &patch); 1891 } 1892 1893 if (types_.NeedsMap()) { 1894 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); 1895 1896 if (types_.CanBeUndetectable()) { 1897 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); 1898 __ And(at, at, Operand(1 << Map::kIsUndetectable)); 1899 // Undetectable -> false. 1900 __ Movn(tos_, zero_reg, at); 1901 __ Ret(ne, at, Operand(zero_reg)); 1902 } 1903 } 1904 1905 if (types_.Contains(SPEC_OBJECT)) { 1906 // Spec object -> true. 1907 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1908 // tos_ contains the correct non-zero return value already. 1909 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); 1910 } 1911 1912 if (types_.Contains(STRING)) { 1913 // String value -> false iff empty. 1914 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1915 Label skip; 1916 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE)); 1917 __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value 1918 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); 1919 __ bind(&skip); 1920 } 1921 1922 if (types_.Contains(HEAP_NUMBER)) { 1923 // Heap number -> false iff +0, -0, or NaN. 1924 Label not_heap_number; 1925 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 1926 __ Branch(¬_heap_number, ne, map, Operand(at)); 1927 Label zero_or_nan, number; 1928 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset)); 1929 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero); 1930 // "tos_" is a register, and contains a non zero value by default. 1931 // Hence we only need to overwrite "tos_" with zero to return false for 1932 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. 1933 __ bind(&zero_or_nan); 1934 __ mov(tos_, zero_reg); 1935 __ bind(&number); 1936 __ Ret(); 1937 __ bind(¬_heap_number); 1938 } 1939 1940 __ bind(&patch); 1941 GenerateTypeTransition(masm); 1942} 1943 1944 1945void ToBooleanStub::CheckOddball(MacroAssembler* masm, 1946 Type type, 1947 Heap::RootListIndex value, 1948 bool result) { 1949 if (types_.Contains(type)) { 1950 // If we see an expected oddball, return its ToBoolean value tos_. 1951 __ LoadRoot(at, value); 1952 __ Subu(at, at, tos_); // This is a check for equality for the movz below. 1953 // The value of a root is never NULL, so we can avoid loading a non-null 1954 // value into tos_ when we want to return 'true'. 1955 if (!result) { 1956 __ Movz(tos_, zero_reg, at); 1957 } 1958 __ Ret(eq, at, Operand(zero_reg)); 1959 } 1960} 1961 1962 1963void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { 1964 __ Move(a3, tos_); 1965 __ li(a2, Operand(Smi::FromInt(tos_.code()))); 1966 __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); 1967 __ Push(a3, a2, a1); 1968 // Patch the caller to an appropriate specialized stub and return the 1969 // operation result to the caller of the stub. 1970 __ TailCallExternalReference( 1971 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), 1972 3, 1973 1); 1974} 1975 1976 1977void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 1978 // We don't allow a GC during a store buffer overflow so there is no need to 1979 // store the registers in any particular way, but we do have to store and 1980 // restore them. 1981 __ MultiPush(kJSCallerSaved | ra.bit()); 1982 if (save_doubles_ == kSaveFPRegs) { 1983 CpuFeatures::Scope scope(FPU); 1984 __ MultiPushFPU(kCallerSavedFPU); 1985 } 1986 const int argument_count = 1; 1987 const int fp_argument_count = 0; 1988 const Register scratch = a1; 1989 1990 AllowExternalCallThatCantCauseGC scope(masm); 1991 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); 1992 __ li(a0, Operand(ExternalReference::isolate_address())); 1993 __ CallCFunction( 1994 ExternalReference::store_buffer_overflow_function(masm->isolate()), 1995 argument_count); 1996 if (save_doubles_ == kSaveFPRegs) { 1997 CpuFeatures::Scope scope(FPU); 1998 __ MultiPopFPU(kCallerSavedFPU); 1999 } 2000 2001 __ MultiPop(kJSCallerSaved | ra.bit()); 2002 __ Ret(); 2003} 2004 2005 2006void UnaryOpStub::PrintName(StringStream* stream) { 2007 const char* op_name = Token::Name(op_); 2008 const char* overwrite_name = NULL; // Make g++ happy. 2009 switch (mode_) { 2010 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; 2011 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; 2012 } 2013 stream->Add("UnaryOpStub_%s_%s_%s", 2014 op_name, 2015 overwrite_name, 2016 UnaryOpIC::GetName(operand_type_)); 2017} 2018 2019 2020// TODO(svenpanne): Use virtual functions instead of switch. 2021void UnaryOpStub::Generate(MacroAssembler* masm) { 2022 switch (operand_type_) { 2023 case UnaryOpIC::UNINITIALIZED: 2024 GenerateTypeTransition(masm); 2025 break; 2026 case UnaryOpIC::SMI: 2027 GenerateSmiStub(masm); 2028 break; 2029 case UnaryOpIC::HEAP_NUMBER: 2030 GenerateHeapNumberStub(masm); 2031 break; 2032 case UnaryOpIC::GENERIC: 2033 GenerateGenericStub(masm); 2034 break; 2035 } 2036} 2037 2038 2039void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 2040 // Argument is in a0 and v0 at this point, so we can overwrite a0. 2041 __ li(a2, Operand(Smi::FromInt(op_))); 2042 __ li(a1, Operand(Smi::FromInt(mode_))); 2043 __ li(a0, Operand(Smi::FromInt(operand_type_))); 2044 __ Push(v0, a2, a1, a0); 2045 2046 __ TailCallExternalReference( 2047 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); 2048} 2049 2050 2051// TODO(svenpanne): Use virtual functions instead of switch. 2052void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 2053 switch (op_) { 2054 case Token::SUB: 2055 GenerateSmiStubSub(masm); 2056 break; 2057 case Token::BIT_NOT: 2058 GenerateSmiStubBitNot(masm); 2059 break; 2060 default: 2061 UNREACHABLE(); 2062 } 2063} 2064 2065 2066void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { 2067 Label non_smi, slow; 2068 GenerateSmiCodeSub(masm, &non_smi, &slow); 2069 __ bind(&non_smi); 2070 __ bind(&slow); 2071 GenerateTypeTransition(masm); 2072} 2073 2074 2075void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { 2076 Label non_smi; 2077 GenerateSmiCodeBitNot(masm, &non_smi); 2078 __ bind(&non_smi); 2079 GenerateTypeTransition(masm); 2080} 2081 2082 2083void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, 2084 Label* non_smi, 2085 Label* slow) { 2086 __ JumpIfNotSmi(a0, non_smi); 2087 2088 // The result of negating zero or the smallest negative smi is not a smi. 2089 __ And(t0, a0, ~0x80000000); 2090 __ Branch(slow, eq, t0, Operand(zero_reg)); 2091 2092 // Return '0 - value'. 2093 __ Ret(USE_DELAY_SLOT); 2094 __ subu(v0, zero_reg, a0); 2095} 2096 2097 2098void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, 2099 Label* non_smi) { 2100 __ JumpIfNotSmi(a0, non_smi); 2101 2102 // Flip bits and revert inverted smi-tag. 2103 __ Neg(v0, a0); 2104 __ And(v0, v0, ~kSmiTagMask); 2105 __ Ret(); 2106} 2107 2108 2109// TODO(svenpanne): Use virtual functions instead of switch. 2110void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 2111 switch (op_) { 2112 case Token::SUB: 2113 GenerateHeapNumberStubSub(masm); 2114 break; 2115 case Token::BIT_NOT: 2116 GenerateHeapNumberStubBitNot(masm); 2117 break; 2118 default: 2119 UNREACHABLE(); 2120 } 2121} 2122 2123 2124void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { 2125 Label non_smi, slow, call_builtin; 2126 GenerateSmiCodeSub(masm, &non_smi, &call_builtin); 2127 __ bind(&non_smi); 2128 GenerateHeapNumberCodeSub(masm, &slow); 2129 __ bind(&slow); 2130 GenerateTypeTransition(masm); 2131 __ bind(&call_builtin); 2132 GenerateGenericCodeFallback(masm); 2133} 2134 2135 2136void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) { 2137 Label non_smi, slow; 2138 GenerateSmiCodeBitNot(masm, &non_smi); 2139 __ bind(&non_smi); 2140 GenerateHeapNumberCodeBitNot(masm, &slow); 2141 __ bind(&slow); 2142 GenerateTypeTransition(masm); 2143} 2144 2145 2146void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, 2147 Label* slow) { 2148 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); 2149 // a0 is a heap number. Get a new heap number in a1. 2150 if (mode_ == UNARY_OVERWRITE) { 2151 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 2152 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 2153 __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 2154 } else { 2155 Label slow_allocate_heapnumber, heapnumber_allocated; 2156 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); 2157 __ jmp(&heapnumber_allocated); 2158 2159 __ bind(&slow_allocate_heapnumber); 2160 { 2161 FrameScope scope(masm, StackFrame::INTERNAL); 2162 __ push(a0); 2163 __ CallRuntime(Runtime::kNumberAlloc, 0); 2164 __ mov(a1, v0); 2165 __ pop(a0); 2166 } 2167 2168 __ bind(&heapnumber_allocated); 2169 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); 2170 __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); 2171 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); 2172 __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. 2173 __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); 2174 __ mov(v0, a1); 2175 } 2176 __ Ret(); 2177} 2178 2179 2180void UnaryOpStub::GenerateHeapNumberCodeBitNot( 2181 MacroAssembler* masm, 2182 Label* slow) { 2183 Label impossible; 2184 2185 EmitCheckForHeapNumber(masm, a0, a1, t2, slow); 2186 // Convert the heap number in a0 to an untagged integer in a1. 2187 __ ConvertToInt32(a0, a1, a2, a3, f0, slow); 2188 2189 // Do the bitwise operation and check if the result fits in a smi. 2190 Label try_float; 2191 __ Neg(a1, a1); 2192 __ Addu(a2, a1, Operand(0x40000000)); 2193 __ Branch(&try_float, lt, a2, Operand(zero_reg)); 2194 2195 // Tag the result as a smi and we're done. 2196 __ SmiTag(v0, a1); 2197 __ Ret(); 2198 2199 // Try to store the result in a heap number. 2200 __ bind(&try_float); 2201 if (mode_ == UNARY_NO_OVERWRITE) { 2202 Label slow_allocate_heapnumber, heapnumber_allocated; 2203 // Allocate a new heap number without zapping v0, which we need if it fails. 2204 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); 2205 __ jmp(&heapnumber_allocated); 2206 2207 __ bind(&slow_allocate_heapnumber); 2208 { 2209 FrameScope scope(masm, StackFrame::INTERNAL); 2210 __ push(v0); // Push the heap number, not the untagged int32. 2211 __ CallRuntime(Runtime::kNumberAlloc, 0); 2212 __ mov(a2, v0); // Move the new heap number into a2. 2213 // Get the heap number into v0, now that the new heap number is in a2. 2214 __ pop(v0); 2215 } 2216 2217 // Convert the heap number in v0 to an untagged integer in a1. 2218 // This can't go slow-case because it's the same number we already 2219 // converted once again. 2220 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); 2221 // Negate the result. 2222 __ Xor(a1, a1, -1); 2223 2224 __ bind(&heapnumber_allocated); 2225 __ mov(v0, a2); // Move newly allocated heap number to v0. 2226 } 2227 2228 if (CpuFeatures::IsSupported(FPU)) { 2229 // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. 2230 CpuFeatures::Scope scope(FPU); 2231 __ mtc1(a1, f0); 2232 __ cvt_d_w(f0, f0); 2233 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); 2234 __ Ret(); 2235 } else { 2236 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not 2237 // have to set up a frame. 2238 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); 2239 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 2240 } 2241 2242 __ bind(&impossible); 2243 if (FLAG_debug_code) { 2244 __ stop("Incorrect assumption in bit-not stub"); 2245 } 2246} 2247 2248 2249// TODO(svenpanne): Use virtual functions instead of switch. 2250void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { 2251 switch (op_) { 2252 case Token::SUB: 2253 GenerateGenericStubSub(masm); 2254 break; 2255 case Token::BIT_NOT: 2256 GenerateGenericStubBitNot(masm); 2257 break; 2258 default: 2259 UNREACHABLE(); 2260 } 2261} 2262 2263 2264void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { 2265 Label non_smi, slow; 2266 GenerateSmiCodeSub(masm, &non_smi, &slow); 2267 __ bind(&non_smi); 2268 GenerateHeapNumberCodeSub(masm, &slow); 2269 __ bind(&slow); 2270 GenerateGenericCodeFallback(masm); 2271} 2272 2273 2274void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { 2275 Label non_smi, slow; 2276 GenerateSmiCodeBitNot(masm, &non_smi); 2277 __ bind(&non_smi); 2278 GenerateHeapNumberCodeBitNot(masm, &slow); 2279 __ bind(&slow); 2280 GenerateGenericCodeFallback(masm); 2281} 2282 2283 2284void UnaryOpStub::GenerateGenericCodeFallback( 2285 MacroAssembler* masm) { 2286 // Handle the slow case by jumping to the JavaScript builtin. 2287 __ push(a0); 2288 switch (op_) { 2289 case Token::SUB: 2290 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); 2291 break; 2292 case Token::BIT_NOT: 2293 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); 2294 break; 2295 default: 2296 UNREACHABLE(); 2297 } 2298} 2299 2300 2301void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 2302 Label get_result; 2303 2304 __ Push(a1, a0); 2305 2306 __ li(a2, Operand(Smi::FromInt(MinorKey()))); 2307 __ li(a1, Operand(Smi::FromInt(op_))); 2308 __ li(a0, Operand(Smi::FromInt(operands_type_))); 2309 __ Push(a2, a1, a0); 2310 2311 __ TailCallExternalReference( 2312 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), 2313 masm->isolate()), 2314 5, 2315 1); 2316} 2317 2318 2319void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( 2320 MacroAssembler* masm) { 2321 UNIMPLEMENTED(); 2322} 2323 2324 2325void BinaryOpStub::Generate(MacroAssembler* masm) { 2326 // Explicitly allow generation of nested stubs. It is safe here because 2327 // generation code does not use any raw pointers. 2328 AllowStubCallsScope allow_stub_calls(masm, true); 2329 switch (operands_type_) { 2330 case BinaryOpIC::UNINITIALIZED: 2331 GenerateTypeTransition(masm); 2332 break; 2333 case BinaryOpIC::SMI: 2334 GenerateSmiStub(masm); 2335 break; 2336 case BinaryOpIC::INT32: 2337 GenerateInt32Stub(masm); 2338 break; 2339 case BinaryOpIC::HEAP_NUMBER: 2340 GenerateHeapNumberStub(masm); 2341 break; 2342 case BinaryOpIC::ODDBALL: 2343 GenerateOddballStub(masm); 2344 break; 2345 case BinaryOpIC::BOTH_STRING: 2346 GenerateBothStringStub(masm); 2347 break; 2348 case BinaryOpIC::STRING: 2349 GenerateStringStub(masm); 2350 break; 2351 case BinaryOpIC::GENERIC: 2352 GenerateGeneric(masm); 2353 break; 2354 default: 2355 UNREACHABLE(); 2356 } 2357} 2358 2359 2360void BinaryOpStub::PrintName(StringStream* stream) { 2361 const char* op_name = Token::Name(op_); 2362 const char* overwrite_name; 2363 switch (mode_) { 2364 case NO_OVERWRITE: overwrite_name = "Alloc"; break; 2365 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; 2366 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; 2367 default: overwrite_name = "UnknownOverwrite"; break; 2368 } 2369 stream->Add("BinaryOpStub_%s_%s_%s", 2370 op_name, 2371 overwrite_name, 2372 BinaryOpIC::GetName(operands_type_)); 2373} 2374 2375 2376 2377void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { 2378 Register left = a1; 2379 Register right = a0; 2380 2381 Register scratch1 = t0; 2382 Register scratch2 = t1; 2383 2384 ASSERT(right.is(a0)); 2385 STATIC_ASSERT(kSmiTag == 0); 2386 2387 Label not_smi_result; 2388 switch (op_) { 2389 case Token::ADD: 2390 __ AdduAndCheckForOverflow(v0, left, right, scratch1); 2391 __ RetOnNoOverflow(scratch1); 2392 // No need to revert anything - right and left are intact. 2393 break; 2394 case Token::SUB: 2395 __ SubuAndCheckForOverflow(v0, left, right, scratch1); 2396 __ RetOnNoOverflow(scratch1); 2397 // No need to revert anything - right and left are intact. 2398 break; 2399 case Token::MUL: { 2400 // Remove tag from one of the operands. This way the multiplication result 2401 // will be a smi if it fits the smi range. 2402 __ SmiUntag(scratch1, right); 2403 // Do multiplication. 2404 // lo = lower 32 bits of scratch1 * left. 2405 // hi = higher 32 bits of scratch1 * left. 2406 __ Mult(left, scratch1); 2407 // Check for overflowing the smi range - no overflow if higher 33 bits of 2408 // the result are identical. 2409 __ mflo(scratch1); 2410 __ mfhi(scratch2); 2411 __ sra(scratch1, scratch1, 31); 2412 __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2)); 2413 // Go slow on zero result to handle -0. 2414 __ mflo(v0); 2415 __ Ret(ne, v0, Operand(zero_reg)); 2416 // We need -0 if we were multiplying a negative number with 0 to get 0. 2417 // We know one of them was zero. 2418 __ Addu(scratch2, right, left); 2419 Label skip; 2420 // ARM uses the 'pl' condition, which is 'ge'. 2421 // Negating it results in 'lt'. 2422 __ Branch(&skip, lt, scratch2, Operand(zero_reg)); 2423 ASSERT(Smi::FromInt(0) == 0); 2424 __ Ret(USE_DELAY_SLOT); 2425 __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive. 2426 __ bind(&skip); 2427 // We fall through here if we multiplied a negative number with 0, because 2428 // that would mean we should produce -0. 2429 } 2430 break; 2431 case Token::DIV: { 2432 Label done; 2433 __ SmiUntag(scratch2, right); 2434 __ SmiUntag(scratch1, left); 2435 __ Div(scratch1, scratch2); 2436 // A minor optimization: div may be calculated asynchronously, so we check 2437 // for division by zero before getting the result. 2438 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); 2439 // If the result is 0, we need to make sure the dividsor (right) is 2440 // positive, otherwise it is a -0 case. 2441 // Quotient is in 'lo', remainder is in 'hi'. 2442 // Check for no remainder first. 2443 __ mfhi(scratch1); 2444 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); 2445 __ mflo(scratch1); 2446 __ Branch(&done, ne, scratch1, Operand(zero_reg)); 2447 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); 2448 __ bind(&done); 2449 // Check that the signed result fits in a Smi. 2450 __ Addu(scratch2, scratch1, Operand(0x40000000)); 2451 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); 2452 __ SmiTag(v0, scratch1); 2453 __ Ret(); 2454 } 2455 break; 2456 case Token::MOD: { 2457 Label done; 2458 __ SmiUntag(scratch2, right); 2459 __ SmiUntag(scratch1, left); 2460 __ Div(scratch1, scratch2); 2461 // A minor optimization: div may be calculated asynchronously, so we check 2462 // for division by 0 before calling mfhi. 2463 // Check for zero on the right hand side. 2464 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); 2465 // If the result is 0, we need to make sure the dividend (left) is 2466 // positive (or 0), otherwise it is a -0 case. 2467 // Remainder is in 'hi'. 2468 __ mfhi(scratch2); 2469 __ Branch(&done, ne, scratch2, Operand(zero_reg)); 2470 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); 2471 __ bind(&done); 2472 // Check that the signed result fits in a Smi. 2473 __ Addu(scratch1, scratch2, Operand(0x40000000)); 2474 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); 2475 __ SmiTag(v0, scratch2); 2476 __ Ret(); 2477 } 2478 break; 2479 case Token::BIT_OR: 2480 __ Ret(USE_DELAY_SLOT); 2481 __ or_(v0, left, right); 2482 break; 2483 case Token::BIT_AND: 2484 __ Ret(USE_DELAY_SLOT); 2485 __ and_(v0, left, right); 2486 break; 2487 case Token::BIT_XOR: 2488 __ Ret(USE_DELAY_SLOT); 2489 __ xor_(v0, left, right); 2490 break; 2491 case Token::SAR: 2492 // Remove tags from right operand. 2493 __ GetLeastBitsFromSmi(scratch1, right, 5); 2494 __ srav(scratch1, left, scratch1); 2495 // Smi tag result. 2496 __ And(v0, scratch1, ~kSmiTagMask); 2497 __ Ret(); 2498 break; 2499 case Token::SHR: 2500 // Remove tags from operands. We can't do this on a 31 bit number 2501 // because then the 0s get shifted into bit 30 instead of bit 31. 2502 __ SmiUntag(scratch1, left); 2503 __ GetLeastBitsFromSmi(scratch2, right, 5); 2504 __ srlv(v0, scratch1, scratch2); 2505 // Unsigned shift is not allowed to produce a negative number, so 2506 // check the sign bit and the sign bit after Smi tagging. 2507 __ And(scratch1, v0, Operand(0xc0000000)); 2508 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); 2509 // Smi tag result. 2510 __ SmiTag(v0); 2511 __ Ret(); 2512 break; 2513 case Token::SHL: 2514 // Remove tags from operands. 2515 __ SmiUntag(scratch1, left); 2516 __ GetLeastBitsFromSmi(scratch2, right, 5); 2517 __ sllv(scratch1, scratch1, scratch2); 2518 // Check that the signed result fits in a Smi. 2519 __ Addu(scratch2, scratch1, Operand(0x40000000)); 2520 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); 2521 __ SmiTag(v0, scratch1); 2522 __ Ret(); 2523 break; 2524 default: 2525 UNREACHABLE(); 2526 } 2527 __ bind(¬_smi_result); 2528} 2529 2530 2531void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, 2532 bool smi_operands, 2533 Label* not_numbers, 2534 Label* gc_required) { 2535 Register left = a1; 2536 Register right = a0; 2537 Register scratch1 = t3; 2538 Register scratch2 = t5; 2539 Register scratch3 = t0; 2540 2541 ASSERT(smi_operands || (not_numbers != NULL)); 2542 if (smi_operands && FLAG_debug_code) { 2543 __ AbortIfNotSmi(left); 2544 __ AbortIfNotSmi(right); 2545 } 2546 2547 Register heap_number_map = t2; 2548 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2549 2550 switch (op_) { 2551 case Token::ADD: 2552 case Token::SUB: 2553 case Token::MUL: 2554 case Token::DIV: 2555 case Token::MOD: { 2556 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3 2557 // depending on whether FPU is available or not. 2558 FloatingPointHelper::Destination destination = 2559 CpuFeatures::IsSupported(FPU) && 2560 op_ != Token::MOD ? 2561 FloatingPointHelper::kFPURegisters : 2562 FloatingPointHelper::kCoreRegisters; 2563 2564 // Allocate new heap number for result. 2565 Register result = s0; 2566 GenerateHeapResultAllocation( 2567 masm, result, heap_number_map, scratch1, scratch2, gc_required); 2568 2569 // Load the operands. 2570 if (smi_operands) { 2571 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); 2572 } else { 2573 FloatingPointHelper::LoadOperands(masm, 2574 destination, 2575 heap_number_map, 2576 scratch1, 2577 scratch2, 2578 not_numbers); 2579 } 2580 2581 // Calculate the result. 2582 if (destination == FloatingPointHelper::kFPURegisters) { 2583 // Using FPU registers: 2584 // f12: Left value. 2585 // f14: Right value. 2586 CpuFeatures::Scope scope(FPU); 2587 switch (op_) { 2588 case Token::ADD: 2589 __ add_d(f10, f12, f14); 2590 break; 2591 case Token::SUB: 2592 __ sub_d(f10, f12, f14); 2593 break; 2594 case Token::MUL: 2595 __ mul_d(f10, f12, f14); 2596 break; 2597 case Token::DIV: 2598 __ div_d(f10, f12, f14); 2599 break; 2600 default: 2601 UNREACHABLE(); 2602 } 2603 2604 // ARM uses a workaround here because of the unaligned HeapNumber 2605 // kValueOffset. On MIPS this workaround is built into sdc1 so 2606 // there's no point in generating even more instructions. 2607 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); 2608 __ Ret(USE_DELAY_SLOT); 2609 __ mov(v0, result); 2610 } else { 2611 // Call the C function to handle the double operation. 2612 FloatingPointHelper::CallCCodeForDoubleOperation(masm, 2613 op_, 2614 result, 2615 scratch1); 2616 if (FLAG_debug_code) { 2617 __ stop("Unreachable code."); 2618 } 2619 } 2620 break; 2621 } 2622 case Token::BIT_OR: 2623 case Token::BIT_XOR: 2624 case Token::BIT_AND: 2625 case Token::SAR: 2626 case Token::SHR: 2627 case Token::SHL: { 2628 if (smi_operands) { 2629 __ SmiUntag(a3, left); 2630 __ SmiUntag(a2, right); 2631 } else { 2632 // Convert operands to 32-bit integers. Right in a2 and left in a3. 2633 FloatingPointHelper::ConvertNumberToInt32(masm, 2634 left, 2635 a3, 2636 heap_number_map, 2637 scratch1, 2638 scratch2, 2639 scratch3, 2640 f0, 2641 not_numbers); 2642 FloatingPointHelper::ConvertNumberToInt32(masm, 2643 right, 2644 a2, 2645 heap_number_map, 2646 scratch1, 2647 scratch2, 2648 scratch3, 2649 f0, 2650 not_numbers); 2651 } 2652 Label result_not_a_smi; 2653 switch (op_) { 2654 case Token::BIT_OR: 2655 __ Or(a2, a3, Operand(a2)); 2656 break; 2657 case Token::BIT_XOR: 2658 __ Xor(a2, a3, Operand(a2)); 2659 break; 2660 case Token::BIT_AND: 2661 __ And(a2, a3, Operand(a2)); 2662 break; 2663 case Token::SAR: 2664 // Use only the 5 least significant bits of the shift count. 2665 __ GetLeastBitsFromInt32(a2, a2, 5); 2666 __ srav(a2, a3, a2); 2667 break; 2668 case Token::SHR: 2669 // Use only the 5 least significant bits of the shift count. 2670 __ GetLeastBitsFromInt32(a2, a2, 5); 2671 __ srlv(a2, a3, a2); 2672 // SHR is special because it is required to produce a positive answer. 2673 // The code below for writing into heap numbers isn't capable of 2674 // writing the register as an unsigned int so we go to slow case if we 2675 // hit this case. 2676 if (CpuFeatures::IsSupported(FPU)) { 2677 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg)); 2678 } else { 2679 __ Branch(not_numbers, lt, a2, Operand(zero_reg)); 2680 } 2681 break; 2682 case Token::SHL: 2683 // Use only the 5 least significant bits of the shift count. 2684 __ GetLeastBitsFromInt32(a2, a2, 5); 2685 __ sllv(a2, a3, a2); 2686 break; 2687 default: 2688 UNREACHABLE(); 2689 } 2690 // Check that the *signed* result fits in a smi. 2691 __ Addu(a3, a2, Operand(0x40000000)); 2692 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); 2693 __ SmiTag(v0, a2); 2694 __ Ret(); 2695 2696 // Allocate new heap number for result. 2697 __ bind(&result_not_a_smi); 2698 Register result = t1; 2699 if (smi_operands) { 2700 __ AllocateHeapNumber( 2701 result, scratch1, scratch2, heap_number_map, gc_required); 2702 } else { 2703 GenerateHeapResultAllocation( 2704 masm, result, heap_number_map, scratch1, scratch2, gc_required); 2705 } 2706 2707 // a2: Answer as signed int32. 2708 // t1: Heap number to write answer into. 2709 2710 // Nothing can go wrong now, so move the heap number to v0, which is the 2711 // result. 2712 __ mov(v0, t1); 2713 2714 if (CpuFeatures::IsSupported(FPU)) { 2715 // Convert the int32 in a2 to the heap number in a0. As 2716 // mentioned above SHR needs to always produce a positive result. 2717 CpuFeatures::Scope scope(FPU); 2718 __ mtc1(a2, f0); 2719 if (op_ == Token::SHR) { 2720 __ Cvt_d_uw(f0, f0, f22); 2721 } else { 2722 __ cvt_d_w(f0, f0); 2723 } 2724 // ARM uses a workaround here because of the unaligned HeapNumber 2725 // kValueOffset. On MIPS this workaround is built into sdc1 so 2726 // there's no point in generating even more instructions. 2727 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); 2728 __ Ret(); 2729 } else { 2730 // Tail call that writes the int32 in a2 to the heap number in v0, using 2731 // a3 and a0 as scratch. v0 is preserved and returned. 2732 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); 2733 __ TailCallStub(&stub); 2734 } 2735 break; 2736 } 2737 default: 2738 UNREACHABLE(); 2739 } 2740} 2741 2742 2743// Generate the smi code. If the operation on smis are successful this return is 2744// generated. If the result is not a smi and heap number allocation is not 2745// requested the code falls through. If number allocation is requested but a 2746// heap number cannot be allocated the code jumps to the lable gc_required. 2747void BinaryOpStub::GenerateSmiCode( 2748 MacroAssembler* masm, 2749 Label* use_runtime, 2750 Label* gc_required, 2751 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 2752 Label not_smis; 2753 2754 Register left = a1; 2755 Register right = a0; 2756 Register scratch1 = t3; 2757 Register scratch2 = t5; 2758 2759 // Perform combined smi check on both operands. 2760 __ Or(scratch1, left, Operand(right)); 2761 STATIC_ASSERT(kSmiTag == 0); 2762 __ JumpIfNotSmi(scratch1, ¬_smis); 2763 2764 // If the smi-smi operation results in a smi return is generated. 2765 GenerateSmiSmiOperation(masm); 2766 2767 // If heap number results are possible generate the result in an allocated 2768 // heap number. 2769 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { 2770 GenerateFPOperation(masm, true, use_runtime, gc_required); 2771 } 2772 __ bind(¬_smis); 2773} 2774 2775 2776void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 2777 Label not_smis, call_runtime; 2778 2779 if (result_type_ == BinaryOpIC::UNINITIALIZED || 2780 result_type_ == BinaryOpIC::SMI) { 2781 // Only allow smi results. 2782 GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); 2783 } else { 2784 // Allow heap number result and don't make a transition if a heap number 2785 // cannot be allocated. 2786 GenerateSmiCode(masm, 2787 &call_runtime, 2788 &call_runtime, 2789 ALLOW_HEAPNUMBER_RESULTS); 2790 } 2791 2792 // Code falls through if the result is not returned as either a smi or heap 2793 // number. 2794 GenerateTypeTransition(masm); 2795 2796 __ bind(&call_runtime); 2797 GenerateCallRuntime(masm); 2798} 2799 2800 2801void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { 2802 ASSERT(operands_type_ == BinaryOpIC::STRING); 2803 // Try to add arguments as strings, otherwise, transition to the generic 2804 // BinaryOpIC type. 2805 GenerateAddStrings(masm); 2806 GenerateTypeTransition(masm); 2807} 2808 2809 2810void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { 2811 Label call_runtime; 2812 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); 2813 ASSERT(op_ == Token::ADD); 2814 // If both arguments are strings, call the string add stub. 2815 // Otherwise, do a transition. 2816 2817 // Registers containing left and right operands respectively. 2818 Register left = a1; 2819 Register right = a0; 2820 2821 // Test if left operand is a string. 2822 __ JumpIfSmi(left, &call_runtime); 2823 __ GetObjectType(left, a2, a2); 2824 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); 2825 2826 // Test if right operand is a string. 2827 __ JumpIfSmi(right, &call_runtime); 2828 __ GetObjectType(right, a2, a2); 2829 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); 2830 2831 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); 2832 GenerateRegisterArgsPush(masm); 2833 __ TailCallStub(&string_add_stub); 2834 2835 __ bind(&call_runtime); 2836 GenerateTypeTransition(masm); 2837} 2838 2839 2840void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 2841 ASSERT(operands_type_ == BinaryOpIC::INT32); 2842 2843 Register left = a1; 2844 Register right = a0; 2845 Register scratch1 = t3; 2846 Register scratch2 = t5; 2847 FPURegister double_scratch = f0; 2848 FPURegister single_scratch = f6; 2849 2850 Register heap_number_result = no_reg; 2851 Register heap_number_map = t2; 2852 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2853 2854 Label call_runtime; 2855 // Labels for type transition, used for wrong input or output types. 2856 // Both label are currently actually bound to the same position. We use two 2857 // different label to differentiate the cause leading to type transition. 2858 Label transition; 2859 2860 // Smi-smi fast case. 2861 Label skip; 2862 __ Or(scratch1, left, right); 2863 __ JumpIfNotSmi(scratch1, &skip); 2864 GenerateSmiSmiOperation(masm); 2865 // Fall through if the result is not a smi. 2866 __ bind(&skip); 2867 2868 switch (op_) { 2869 case Token::ADD: 2870 case Token::SUB: 2871 case Token::MUL: 2872 case Token::DIV: 2873 case Token::MOD: { 2874 // Load both operands and check that they are 32-bit integer. 2875 // Jump to type transition if they are not. The registers a0 and a1 (right 2876 // and left) are preserved for the runtime call. 2877 FloatingPointHelper::Destination destination = 2878 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD) 2879 ? FloatingPointHelper::kFPURegisters 2880 : FloatingPointHelper::kCoreRegisters; 2881 2882 FloatingPointHelper::LoadNumberAsInt32Double(masm, 2883 right, 2884 destination, 2885 f14, 2886 a2, 2887 a3, 2888 heap_number_map, 2889 scratch1, 2890 scratch2, 2891 f2, 2892 &transition); 2893 FloatingPointHelper::LoadNumberAsInt32Double(masm, 2894 left, 2895 destination, 2896 f12, 2897 t0, 2898 t1, 2899 heap_number_map, 2900 scratch1, 2901 scratch2, 2902 f2, 2903 &transition); 2904 2905 if (destination == FloatingPointHelper::kFPURegisters) { 2906 CpuFeatures::Scope scope(FPU); 2907 Label return_heap_number; 2908 switch (op_) { 2909 case Token::ADD: 2910 __ add_d(f10, f12, f14); 2911 break; 2912 case Token::SUB: 2913 __ sub_d(f10, f12, f14); 2914 break; 2915 case Token::MUL: 2916 __ mul_d(f10, f12, f14); 2917 break; 2918 case Token::DIV: 2919 __ div_d(f10, f12, f14); 2920 break; 2921 default: 2922 UNREACHABLE(); 2923 } 2924 2925 if (op_ != Token::DIV) { 2926 // These operations produce an integer result. 2927 // Try to return a smi if we can. 2928 // Otherwise return a heap number if allowed, or jump to type 2929 // transition. 2930 2931 Register except_flag = scratch2; 2932 __ EmitFPUTruncate(kRoundToZero, 2933 single_scratch, 2934 f10, 2935 scratch1, 2936 except_flag); 2937 2938 if (result_type_ <= BinaryOpIC::INT32) { 2939 // If except_flag != 0, result does not fit in a 32-bit integer. 2940 __ Branch(&transition, ne, except_flag, Operand(zero_reg)); 2941 } 2942 2943 // Check if the result fits in a smi. 2944 __ mfc1(scratch1, single_scratch); 2945 __ Addu(scratch2, scratch1, Operand(0x40000000)); 2946 // If not try to return a heap number. 2947 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); 2948 // Check for minus zero. Return heap number for minus zero. 2949 Label not_zero; 2950 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); 2951 __ mfc1(scratch2, f11); 2952 __ And(scratch2, scratch2, HeapNumber::kSignMask); 2953 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg)); 2954 __ bind(¬_zero); 2955 2956 // Tag the result and return. 2957 __ SmiTag(v0, scratch1); 2958 __ Ret(); 2959 } else { 2960 // DIV just falls through to allocating a heap number. 2961 } 2962 2963 __ bind(&return_heap_number); 2964 // Return a heap number, or fall through to type transition or runtime 2965 // call if we can't. 2966 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER 2967 : BinaryOpIC::INT32)) { 2968 // We are using FPU registers so s0 is available. 2969 heap_number_result = s0; 2970 GenerateHeapResultAllocation(masm, 2971 heap_number_result, 2972 heap_number_map, 2973 scratch1, 2974 scratch2, 2975 &call_runtime); 2976 __ mov(v0, heap_number_result); 2977 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); 2978 __ Ret(); 2979 } 2980 2981 // A DIV operation expecting an integer result falls through 2982 // to type transition. 2983 2984 } else { 2985 // We preserved a0 and a1 to be able to call runtime. 2986 // Save the left value on the stack. 2987 __ Push(t1, t0); 2988 2989 Label pop_and_call_runtime; 2990 2991 // Allocate a heap number to store the result. 2992 heap_number_result = s0; 2993 GenerateHeapResultAllocation(masm, 2994 heap_number_result, 2995 heap_number_map, 2996 scratch1, 2997 scratch2, 2998 &pop_and_call_runtime); 2999 3000 // Load the left value from the value saved on the stack. 3001 __ Pop(a1, a0); 3002 3003 // Call the C function to handle the double operation. 3004 FloatingPointHelper::CallCCodeForDoubleOperation( 3005 masm, op_, heap_number_result, scratch1); 3006 if (FLAG_debug_code) { 3007 __ stop("Unreachable code."); 3008 } 3009 3010 __ bind(&pop_and_call_runtime); 3011 __ Drop(2); 3012 __ Branch(&call_runtime); 3013 } 3014 3015 break; 3016 } 3017 3018 case Token::BIT_OR: 3019 case Token::BIT_XOR: 3020 case Token::BIT_AND: 3021 case Token::SAR: 3022 case Token::SHR: 3023 case Token::SHL: { 3024 Label return_heap_number; 3025 Register scratch3 = t1; 3026 // Convert operands to 32-bit integers. Right in a2 and left in a3. The 3027 // registers a0 and a1 (right and left) are preserved for the runtime 3028 // call. 3029 FloatingPointHelper::LoadNumberAsInt32(masm, 3030 left, 3031 a3, 3032 heap_number_map, 3033 scratch1, 3034 scratch2, 3035 scratch3, 3036 f0, 3037 &transition); 3038 FloatingPointHelper::LoadNumberAsInt32(masm, 3039 right, 3040 a2, 3041 heap_number_map, 3042 scratch1, 3043 scratch2, 3044 scratch3, 3045 f0, 3046 &transition); 3047 3048 // The ECMA-262 standard specifies that, for shift operations, only the 3049 // 5 least significant bits of the shift value should be used. 3050 switch (op_) { 3051 case Token::BIT_OR: 3052 __ Or(a2, a3, Operand(a2)); 3053 break; 3054 case Token::BIT_XOR: 3055 __ Xor(a2, a3, Operand(a2)); 3056 break; 3057 case Token::BIT_AND: 3058 __ And(a2, a3, Operand(a2)); 3059 break; 3060 case Token::SAR: 3061 __ And(a2, a2, Operand(0x1f)); 3062 __ srav(a2, a3, a2); 3063 break; 3064 case Token::SHR: 3065 __ And(a2, a2, Operand(0x1f)); 3066 __ srlv(a2, a3, a2); 3067 // SHR is special because it is required to produce a positive answer. 3068 // We only get a negative result if the shift value (a2) is 0. 3069 // This result cannot be respresented as a signed 32-bit integer, try 3070 // to return a heap number if we can. 3071 // The non FPU code does not support this special case, so jump to 3072 // runtime if we don't support it. 3073 if (CpuFeatures::IsSupported(FPU)) { 3074 __ Branch((result_type_ <= BinaryOpIC::INT32) 3075 ? &transition 3076 : &return_heap_number, 3077 lt, 3078 a2, 3079 Operand(zero_reg)); 3080 } else { 3081 __ Branch((result_type_ <= BinaryOpIC::INT32) 3082 ? &transition 3083 : &call_runtime, 3084 lt, 3085 a2, 3086 Operand(zero_reg)); 3087 } 3088 break; 3089 case Token::SHL: 3090 __ And(a2, a2, Operand(0x1f)); 3091 __ sllv(a2, a3, a2); 3092 break; 3093 default: 3094 UNREACHABLE(); 3095 } 3096 3097 // Check if the result fits in a smi. 3098 __ Addu(scratch1, a2, Operand(0x40000000)); 3099 // If not try to return a heap number. (We know the result is an int32.) 3100 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); 3101 // Tag the result and return. 3102 __ SmiTag(v0, a2); 3103 __ Ret(); 3104 3105 __ bind(&return_heap_number); 3106 heap_number_result = t1; 3107 GenerateHeapResultAllocation(masm, 3108 heap_number_result, 3109 heap_number_map, 3110 scratch1, 3111 scratch2, 3112 &call_runtime); 3113 3114 if (CpuFeatures::IsSupported(FPU)) { 3115 CpuFeatures::Scope scope(FPU); 3116 3117 if (op_ != Token::SHR) { 3118 // Convert the result to a floating point value. 3119 __ mtc1(a2, double_scratch); 3120 __ cvt_d_w(double_scratch, double_scratch); 3121 } else { 3122 // The result must be interpreted as an unsigned 32-bit integer. 3123 __ mtc1(a2, double_scratch); 3124 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); 3125 } 3126 3127 // Store the result. 3128 __ mov(v0, heap_number_result); 3129 __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset)); 3130 __ Ret(); 3131 } else { 3132 // Tail call that writes the int32 in a2 to the heap number in v0, using 3133 // a3 and a0 as scratch. v0 is preserved and returned. 3134 __ mov(a0, t1); 3135 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); 3136 __ TailCallStub(&stub); 3137 } 3138 3139 break; 3140 } 3141 3142 default: 3143 UNREACHABLE(); 3144 } 3145 3146 // We never expect DIV to yield an integer result, so we always generate 3147 // type transition code for DIV operations expecting an integer result: the 3148 // code will fall through to this type transition. 3149 if (transition.is_linked() || 3150 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { 3151 __ bind(&transition); 3152 GenerateTypeTransition(masm); 3153 } 3154 3155 __ bind(&call_runtime); 3156 GenerateCallRuntime(masm); 3157} 3158 3159 3160void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { 3161 Label call_runtime; 3162 3163 if (op_ == Token::ADD) { 3164 // Handle string addition here, because it is the only operation 3165 // that does not do a ToNumber conversion on the operands. 3166 GenerateAddStrings(masm); 3167 } 3168 3169 // Convert oddball arguments to numbers. 3170 Label check, done; 3171 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); 3172 __ Branch(&check, ne, a1, Operand(t0)); 3173 if (Token::IsBitOp(op_)) { 3174 __ li(a1, Operand(Smi::FromInt(0))); 3175 } else { 3176 __ LoadRoot(a1, Heap::kNanValueRootIndex); 3177 } 3178 __ jmp(&done); 3179 __ bind(&check); 3180 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); 3181 __ Branch(&done, ne, a0, Operand(t0)); 3182 if (Token::IsBitOp(op_)) { 3183 __ li(a0, Operand(Smi::FromInt(0))); 3184 } else { 3185 __ LoadRoot(a0, Heap::kNanValueRootIndex); 3186 } 3187 __ bind(&done); 3188 3189 GenerateHeapNumberStub(masm); 3190} 3191 3192 3193void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 3194 Label call_runtime; 3195 GenerateFPOperation(masm, false, &call_runtime, &call_runtime); 3196 3197 __ bind(&call_runtime); 3198 GenerateCallRuntime(masm); 3199} 3200 3201 3202void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 3203 Label call_runtime, call_string_add_or_runtime; 3204 3205 GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 3206 3207 GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); 3208 3209 __ bind(&call_string_add_or_runtime); 3210 if (op_ == Token::ADD) { 3211 GenerateAddStrings(masm); 3212 } 3213 3214 __ bind(&call_runtime); 3215 GenerateCallRuntime(masm); 3216} 3217 3218 3219void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { 3220 ASSERT(op_ == Token::ADD); 3221 Label left_not_string, call_runtime; 3222 3223 Register left = a1; 3224 Register right = a0; 3225 3226 // Check if left argument is a string. 3227 __ JumpIfSmi(left, &left_not_string); 3228 __ GetObjectType(left, a2, a2); 3229 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE)); 3230 3231 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); 3232 GenerateRegisterArgsPush(masm); 3233 __ TailCallStub(&string_add_left_stub); 3234 3235 // Left operand is not a string, test right. 3236 __ bind(&left_not_string); 3237 __ JumpIfSmi(right, &call_runtime); 3238 __ GetObjectType(right, a2, a2); 3239 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); 3240 3241 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); 3242 GenerateRegisterArgsPush(masm); 3243 __ TailCallStub(&string_add_right_stub); 3244 3245 // At least one argument is not a string. 3246 __ bind(&call_runtime); 3247} 3248 3249 3250void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { 3251 GenerateRegisterArgsPush(masm); 3252 switch (op_) { 3253 case Token::ADD: 3254 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); 3255 break; 3256 case Token::SUB: 3257 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); 3258 break; 3259 case Token::MUL: 3260 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); 3261 break; 3262 case Token::DIV: 3263 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); 3264 break; 3265 case Token::MOD: 3266 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); 3267 break; 3268 case Token::BIT_OR: 3269 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); 3270 break; 3271 case Token::BIT_AND: 3272 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); 3273 break; 3274 case Token::BIT_XOR: 3275 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); 3276 break; 3277 case Token::SAR: 3278 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); 3279 break; 3280 case Token::SHR: 3281 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); 3282 break; 3283 case Token::SHL: 3284 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); 3285 break; 3286 default: 3287 UNREACHABLE(); 3288 } 3289} 3290 3291 3292void BinaryOpStub::GenerateHeapResultAllocation( 3293 MacroAssembler* masm, 3294 Register result, 3295 Register heap_number_map, 3296 Register scratch1, 3297 Register scratch2, 3298 Label* gc_required) { 3299 3300 // Code below will scratch result if allocation fails. To keep both arguments 3301 // intact for the runtime call result cannot be one of these. 3302 ASSERT(!result.is(a0) && !result.is(a1)); 3303 3304 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { 3305 Label skip_allocation, allocated; 3306 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0; 3307 // If the overwritable operand is already an object, we skip the 3308 // allocation of a heap number. 3309 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); 3310 // Allocate a heap number for the result. 3311 __ AllocateHeapNumber( 3312 result, scratch1, scratch2, heap_number_map, gc_required); 3313 __ Branch(&allocated); 3314 __ bind(&skip_allocation); 3315 // Use object holding the overwritable operand for result. 3316 __ mov(result, overwritable_operand); 3317 __ bind(&allocated); 3318 } else { 3319 ASSERT(mode_ == NO_OVERWRITE); 3320 __ AllocateHeapNumber( 3321 result, scratch1, scratch2, heap_number_map, gc_required); 3322 } 3323} 3324 3325 3326void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 3327 __ Push(a1, a0); 3328} 3329 3330 3331 3332void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 3333 // Untagged case: double input in f4, double result goes 3334 // into f4. 3335 // Tagged case: tagged input on top of stack and in a0, 3336 // tagged result (heap number) goes into v0. 3337 3338 Label input_not_smi; 3339 Label loaded; 3340 Label calculate; 3341 Label invalid_cache; 3342 const Register scratch0 = t5; 3343 const Register scratch1 = t3; 3344 const Register cache_entry = a0; 3345 const bool tagged = (argument_type_ == TAGGED); 3346 3347 if (CpuFeatures::IsSupported(FPU)) { 3348 CpuFeatures::Scope scope(FPU); 3349 3350 if (tagged) { 3351 // Argument is a number and is on stack and in a0. 3352 // Load argument and check if it is a smi. 3353 __ JumpIfNotSmi(a0, &input_not_smi); 3354 3355 // Input is a smi. Convert to double and load the low and high words 3356 // of the double into a2, a3. 3357 __ sra(t0, a0, kSmiTagSize); 3358 __ mtc1(t0, f4); 3359 __ cvt_d_w(f4, f4); 3360 __ Move(a2, a3, f4); 3361 __ Branch(&loaded); 3362 3363 __ bind(&input_not_smi); 3364 // Check if input is a HeapNumber. 3365 __ CheckMap(a0, 3366 a1, 3367 Heap::kHeapNumberMapRootIndex, 3368 &calculate, 3369 DONT_DO_SMI_CHECK); 3370 // Input is a HeapNumber. Store the 3371 // low and high words into a2, a3. 3372 __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset)); 3373 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4)); 3374 } else { 3375 // Input is untagged double in f4. Output goes to f4. 3376 __ Move(a2, a3, f4); 3377 } 3378 __ bind(&loaded); 3379 // a2 = low 32 bits of double value. 3380 // a3 = high 32 bits of double value. 3381 // Compute hash (the shifts are arithmetic): 3382 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); 3383 __ Xor(a1, a2, a3); 3384 __ sra(t0, a1, 16); 3385 __ Xor(a1, a1, t0); 3386 __ sra(t0, a1, 8); 3387 __ Xor(a1, a1, t0); 3388 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); 3389 __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); 3390 3391 // a2 = low 32 bits of double value. 3392 // a3 = high 32 bits of double value. 3393 // a1 = TranscendentalCache::hash(double value). 3394 __ li(cache_entry, Operand( 3395 ExternalReference::transcendental_cache_array_address( 3396 masm->isolate()))); 3397 // a0 points to cache array. 3398 __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof( 3399 Isolate::Current()->transcendental_cache()->caches_[0]))); 3400 // a0 points to the cache for the type type_. 3401 // If NULL, the cache hasn't been initialized yet, so go through runtime. 3402 __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg)); 3403 3404#ifdef DEBUG 3405 // Check that the layout of cache elements match expectations. 3406 { TranscendentalCache::SubCache::Element test_elem[2]; 3407 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); 3408 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); 3409 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); 3410 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); 3411 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); 3412 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. 3413 CHECK_EQ(0, elem_in0 - elem_start); 3414 CHECK_EQ(kIntSize, elem_in1 - elem_start); 3415 CHECK_EQ(2 * kIntSize, elem_out - elem_start); 3416 } 3417#endif 3418 3419 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. 3420 __ sll(t0, a1, 1); 3421 __ Addu(a1, a1, t0); 3422 __ sll(t0, a1, 2); 3423 __ Addu(cache_entry, cache_entry, t0); 3424 3425 // Check if cache matches: Double value is stored in uint32_t[2] array. 3426 __ lw(t0, MemOperand(cache_entry, 0)); 3427 __ lw(t1, MemOperand(cache_entry, 4)); 3428 __ lw(t2, MemOperand(cache_entry, 8)); 3429 __ Branch(&calculate, ne, a2, Operand(t0)); 3430 __ Branch(&calculate, ne, a3, Operand(t1)); 3431 // Cache hit. Load result, cleanup and return. 3432 Counters* counters = masm->isolate()->counters(); 3433 __ IncrementCounter( 3434 counters->transcendental_cache_hit(), 1, scratch0, scratch1); 3435 if (tagged) { 3436 // Pop input value from stack and load result into v0. 3437 __ Drop(1); 3438 __ mov(v0, t2); 3439 } else { 3440 // Load result into f4. 3441 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); 3442 } 3443 __ Ret(); 3444 } // if (CpuFeatures::IsSupported(FPU)) 3445 3446 __ bind(&calculate); 3447 Counters* counters = masm->isolate()->counters(); 3448 __ IncrementCounter( 3449 counters->transcendental_cache_miss(), 1, scratch0, scratch1); 3450 if (tagged) { 3451 __ bind(&invalid_cache); 3452 __ TailCallExternalReference(ExternalReference(RuntimeFunction(), 3453 masm->isolate()), 3454 1, 3455 1); 3456 } else { 3457 if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE(); 3458 CpuFeatures::Scope scope(FPU); 3459 3460 Label no_update; 3461 Label skip_cache; 3462 const Register heap_number_map = t2; 3463 3464 // Call C function to calculate the result and update the cache. 3465 // Register a0 holds precalculated cache entry address; preserve 3466 // it on the stack and pop it into register cache_entry after the 3467 // call. 3468 __ Push(cache_entry, a2, a3); 3469 GenerateCallCFunction(masm, scratch0); 3470 __ GetCFunctionDoubleResult(f4); 3471 3472 // Try to update the cache. If we cannot allocate a 3473 // heap number, we return the result without updating. 3474 __ Pop(cache_entry, a2, a3); 3475 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3476 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); 3477 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); 3478 3479 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); 3480 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); 3481 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); 3482 3483 __ Ret(USE_DELAY_SLOT); 3484 __ mov(v0, cache_entry); 3485 3486 __ bind(&invalid_cache); 3487 // The cache is invalid. Call runtime which will recreate the 3488 // cache. 3489 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 3490 __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); 3491 __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); 3492 { 3493 FrameScope scope(masm, StackFrame::INTERNAL); 3494 __ push(a0); 3495 __ CallRuntime(RuntimeFunction(), 1); 3496 } 3497 __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset)); 3498 __ Ret(); 3499 3500 __ bind(&skip_cache); 3501 // Call C function to calculate the result and answer directly 3502 // without updating the cache. 3503 GenerateCallCFunction(masm, scratch0); 3504 __ GetCFunctionDoubleResult(f4); 3505 __ bind(&no_update); 3506 3507 // We return the value in f4 without adding it to the cache, but 3508 // we cause a scavenging GC so that future allocations will succeed. 3509 { 3510 FrameScope scope(masm, StackFrame::INTERNAL); 3511 3512 // Allocate an aligned object larger than a HeapNumber. 3513 ASSERT(4 * kPointerSize >= HeapNumber::kSize); 3514 __ li(scratch0, Operand(4 * kPointerSize)); 3515 __ push(scratch0); 3516 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); 3517 } 3518 __ Ret(); 3519 } 3520} 3521 3522 3523void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, 3524 Register scratch) { 3525 __ push(ra); 3526 __ PrepareCallCFunction(2, scratch); 3527 if (IsMipsSoftFloatABI) { 3528 __ Move(a0, a1, f4); 3529 } else { 3530 __ mov_d(f12, f4); 3531 } 3532 AllowExternalCallThatCantCauseGC scope(masm); 3533 Isolate* isolate = masm->isolate(); 3534 switch (type_) { 3535 case TranscendentalCache::SIN: 3536 __ CallCFunction( 3537 ExternalReference::math_sin_double_function(isolate), 3538 0, 1); 3539 break; 3540 case TranscendentalCache::COS: 3541 __ CallCFunction( 3542 ExternalReference::math_cos_double_function(isolate), 3543 0, 1); 3544 break; 3545 case TranscendentalCache::TAN: 3546 __ CallCFunction(ExternalReference::math_tan_double_function(isolate), 3547 0, 1); 3548 break; 3549 case TranscendentalCache::LOG: 3550 __ CallCFunction( 3551 ExternalReference::math_log_double_function(isolate), 3552 0, 1); 3553 break; 3554 default: 3555 UNIMPLEMENTED(); 3556 break; 3557 } 3558 __ pop(ra); 3559} 3560 3561 3562Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { 3563 switch (type_) { 3564 // Add more cases when necessary. 3565 case TranscendentalCache::SIN: return Runtime::kMath_sin; 3566 case TranscendentalCache::COS: return Runtime::kMath_cos; 3567 case TranscendentalCache::TAN: return Runtime::kMath_tan; 3568 case TranscendentalCache::LOG: return Runtime::kMath_log; 3569 default: 3570 UNIMPLEMENTED(); 3571 return Runtime::kAbort; 3572 } 3573} 3574 3575 3576void StackCheckStub::Generate(MacroAssembler* masm) { 3577 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); 3578} 3579 3580 3581void InterruptStub::Generate(MacroAssembler* masm) { 3582 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); 3583} 3584 3585 3586void MathPowStub::Generate(MacroAssembler* masm) { 3587 CpuFeatures::Scope fpu_scope(FPU); 3588 const Register base = a1; 3589 const Register exponent = a2; 3590 const Register heapnumbermap = t1; 3591 const Register heapnumber = v0; 3592 const DoubleRegister double_base = f2; 3593 const DoubleRegister double_exponent = f4; 3594 const DoubleRegister double_result = f0; 3595 const DoubleRegister double_scratch = f6; 3596 const FPURegister single_scratch = f8; 3597 const Register scratch = t5; 3598 const Register scratch2 = t3; 3599 3600 Label call_runtime, done, int_exponent; 3601 if (exponent_type_ == ON_STACK) { 3602 Label base_is_smi, unpack_exponent; 3603 // The exponent and base are supplied as arguments on the stack. 3604 // This can only happen if the stub is called from non-optimized code. 3605 // Load input parameters from stack to double registers. 3606 __ lw(base, MemOperand(sp, 1 * kPointerSize)); 3607 __ lw(exponent, MemOperand(sp, 0 * kPointerSize)); 3608 3609 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); 3610 3611 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); 3612 __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset)); 3613 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); 3614 3615 __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); 3616 __ jmp(&unpack_exponent); 3617 3618 __ bind(&base_is_smi); 3619 __ mtc1(scratch, single_scratch); 3620 __ cvt_d_w(double_base, single_scratch); 3621 __ bind(&unpack_exponent); 3622 3623 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); 3624 3625 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); 3626 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); 3627 __ ldc1(double_exponent, 3628 FieldMemOperand(exponent, HeapNumber::kValueOffset)); 3629 } else if (exponent_type_ == TAGGED) { 3630 // Base is already in double_base. 3631 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); 3632 3633 __ ldc1(double_exponent, 3634 FieldMemOperand(exponent, HeapNumber::kValueOffset)); 3635 } 3636 3637 if (exponent_type_ != INTEGER) { 3638 Label int_exponent_convert; 3639 // Detect integer exponents stored as double. 3640 __ EmitFPUTruncate(kRoundToMinusInf, 3641 single_scratch, 3642 double_exponent, 3643 scratch, 3644 scratch2, 3645 kCheckForInexactConversion); 3646 // scratch2 == 0 means there was no conversion error. 3647 __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg)); 3648 3649 if (exponent_type_ == ON_STACK) { 3650 // Detect square root case. Crankshaft detects constant +/-0.5 at 3651 // compile time and uses DoMathPowHalf instead. We then skip this check 3652 // for non-constant cases of +/-0.5 as these hardly occur. 3653 Label not_plus_half; 3654 3655 // Test for 0.5. 3656 __ Move(double_scratch, 0.5); 3657 __ BranchF(USE_DELAY_SLOT, 3658 ¬_plus_half, 3659 NULL, 3660 ne, 3661 double_exponent, 3662 double_scratch); 3663 // double_scratch can be overwritten in the delay slot. 3664 // Calculates square root of base. Check for the special case of 3665 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). 3666 __ Move(double_scratch, -V8_INFINITY); 3667 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch); 3668 __ neg_d(double_result, double_scratch); 3669 3670 // Add +0 to convert -0 to +0. 3671 __ add_d(double_scratch, double_base, kDoubleRegZero); 3672 __ sqrt_d(double_result, double_scratch); 3673 __ jmp(&done); 3674 3675 __ bind(¬_plus_half); 3676 __ Move(double_scratch, -0.5); 3677 __ BranchF(USE_DELAY_SLOT, 3678 &call_runtime, 3679 NULL, 3680 ne, 3681 double_exponent, 3682 double_scratch); 3683 // double_scratch can be overwritten in the delay slot. 3684 // Calculates square root of base. Check for the special case of 3685 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). 3686 __ Move(double_scratch, -V8_INFINITY); 3687 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch); 3688 __ Move(double_result, kDoubleRegZero); 3689 3690 // Add +0 to convert -0 to +0. 3691 __ add_d(double_scratch, double_base, kDoubleRegZero); 3692 __ Move(double_result, 1); 3693 __ sqrt_d(double_scratch, double_scratch); 3694 __ div_d(double_result, double_result, double_scratch); 3695 __ jmp(&done); 3696 } 3697 3698 __ push(ra); 3699 { 3700 AllowExternalCallThatCantCauseGC scope(masm); 3701 __ PrepareCallCFunction(0, 2, scratch); 3702 __ SetCallCDoubleArguments(double_base, double_exponent); 3703 __ CallCFunction( 3704 ExternalReference::power_double_double_function(masm->isolate()), 3705 0, 2); 3706 } 3707 __ pop(ra); 3708 __ GetCFunctionDoubleResult(double_result); 3709 __ jmp(&done); 3710 3711 __ bind(&int_exponent_convert); 3712 __ mfc1(scratch, single_scratch); 3713 } 3714 3715 // Calculate power with integer exponent. 3716 __ bind(&int_exponent); 3717 3718 // Get two copies of exponent in the registers scratch and exponent. 3719 if (exponent_type_ == INTEGER) { 3720 __ mov(scratch, exponent); 3721 } else { 3722 // Exponent has previously been stored into scratch as untagged integer. 3723 __ mov(exponent, scratch); 3724 } 3725 3726 __ mov_d(double_scratch, double_base); // Back up base. 3727 __ Move(double_result, 1.0); 3728 3729 // Get absolute value of exponent. 3730 Label positive_exponent; 3731 __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg)); 3732 __ Subu(scratch, zero_reg, scratch); 3733 __ bind(&positive_exponent); 3734 3735 Label while_true, no_carry, loop_end; 3736 __ bind(&while_true); 3737 3738 __ And(scratch2, scratch, 1); 3739 3740 __ Branch(&no_carry, eq, scratch2, Operand(zero_reg)); 3741 __ mul_d(double_result, double_result, double_scratch); 3742 __ bind(&no_carry); 3743 3744 __ sra(scratch, scratch, 1); 3745 3746 __ Branch(&loop_end, eq, scratch, Operand(zero_reg)); 3747 __ mul_d(double_scratch, double_scratch, double_scratch); 3748 3749 __ Branch(&while_true); 3750 3751 __ bind(&loop_end); 3752 3753 __ Branch(&done, ge, exponent, Operand(zero_reg)); 3754 __ Move(double_scratch, 1.0); 3755 __ div_d(double_result, double_scratch, double_result); 3756 // Test whether result is zero. Bail out to check for subnormal result. 3757 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 3758 __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero); 3759 3760 // double_exponent may not contain the exponent value if the input was a 3761 // smi. We set it with exponent value before bailing out. 3762 __ mtc1(exponent, single_scratch); 3763 __ cvt_d_w(double_exponent, single_scratch); 3764 3765 // Returning or bailing out. 3766 Counters* counters = masm->isolate()->counters(); 3767 if (exponent_type_ == ON_STACK) { 3768 // The arguments are still on the stack. 3769 __ bind(&call_runtime); 3770 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 3771 3772 // The stub is called from non-optimized code, which expects the result 3773 // as heap number in exponent. 3774 __ bind(&done); 3775 __ AllocateHeapNumber( 3776 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); 3777 __ sdc1(double_result, 3778 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); 3779 ASSERT(heapnumber.is(v0)); 3780 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); 3781 __ DropAndRet(2); 3782 } else { 3783 __ push(ra); 3784 { 3785 AllowExternalCallThatCantCauseGC scope(masm); 3786 __ PrepareCallCFunction(0, 2, scratch); 3787 __ SetCallCDoubleArguments(double_base, double_exponent); 3788 __ CallCFunction( 3789 ExternalReference::power_double_double_function(masm->isolate()), 3790 0, 2); 3791 } 3792 __ pop(ra); 3793 __ GetCFunctionDoubleResult(double_result); 3794 3795 __ bind(&done); 3796 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); 3797 __ Ret(); 3798 } 3799} 3800 3801 3802bool CEntryStub::NeedsImmovableCode() { 3803 return true; 3804} 3805 3806 3807bool CEntryStub::IsPregenerated() { 3808 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && 3809 result_size_ == 1; 3810} 3811 3812 3813void CodeStub::GenerateStubsAheadOfTime() { 3814 CEntryStub::GenerateAheadOfTime(); 3815 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); 3816 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); 3817 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); 3818} 3819 3820 3821void CodeStub::GenerateFPStubs() { 3822 CEntryStub save_doubles(1, kSaveFPRegs); 3823 Handle<Code> code = save_doubles.GetCode(); 3824 code->set_is_pregenerated(true); 3825 StoreBufferOverflowStub stub(kSaveFPRegs); 3826 stub.GetCode()->set_is_pregenerated(true); 3827 code->GetIsolate()->set_fp_stubs_generated(true); 3828} 3829 3830 3831void CEntryStub::GenerateAheadOfTime() { 3832 CEntryStub stub(1, kDontSaveFPRegs); 3833 Handle<Code> code = stub.GetCode(); 3834 code->set_is_pregenerated(true); 3835} 3836 3837 3838void CEntryStub::GenerateCore(MacroAssembler* masm, 3839 Label* throw_normal_exception, 3840 Label* throw_termination_exception, 3841 Label* throw_out_of_memory_exception, 3842 bool do_gc, 3843 bool always_allocate) { 3844 // v0: result parameter for PerformGC, if any 3845 // s0: number of arguments including receiver (C callee-saved) 3846 // s1: pointer to the first argument (C callee-saved) 3847 // s2: pointer to builtin function (C callee-saved) 3848 3849 Isolate* isolate = masm->isolate(); 3850 3851 if (do_gc) { 3852 // Move result passed in v0 into a0 to call PerformGC. 3853 __ mov(a0, v0); 3854 __ PrepareCallCFunction(1, 0, a1); 3855 __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0); 3856 } 3857 3858 ExternalReference scope_depth = 3859 ExternalReference::heap_always_allocate_scope_depth(isolate); 3860 if (always_allocate) { 3861 __ li(a0, Operand(scope_depth)); 3862 __ lw(a1, MemOperand(a0)); 3863 __ Addu(a1, a1, Operand(1)); 3864 __ sw(a1, MemOperand(a0)); 3865 } 3866 3867 // Prepare arguments for C routine. 3868 // a0 = argc 3869 __ mov(a0, s0); 3870 // a1 = argv (set in the delay slot after find_ra below). 3871 3872 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We 3873 // also need to reserve the 4 argument slots on the stack. 3874 3875 __ AssertStackIsAligned(); 3876 3877 __ li(a2, Operand(ExternalReference::isolate_address())); 3878 3879 // To let the GC traverse the return address of the exit frames, we need to 3880 // know where the return address is. The CEntryStub is unmovable, so 3881 // we can store the address on the stack to be able to find it again and 3882 // we never have to restore it, because it will not change. 3883 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); 3884 // This branch-and-link sequence is needed to find the current PC on mips, 3885 // saved to the ra register. 3886 // Use masm-> here instead of the double-underscore macro since extra 3887 // coverage code can interfere with the proper calculation of ra. 3888 Label find_ra; 3889 masm->bal(&find_ra); // bal exposes branch delay slot. 3890 masm->mov(a1, s1); 3891 masm->bind(&find_ra); 3892 3893 // Adjust the value in ra to point to the correct return location, 2nd 3894 // instruction past the real call into C code (the jalr(t9)), and push it. 3895 // This is the return address of the exit frame. 3896 const int kNumInstructionsToJump = 5; 3897 masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize); 3898 masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame. 3899 // Stack space reservation moved to the branch delay slot below. 3900 // Stack is still aligned. 3901 3902 // Call the C routine. 3903 masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC. 3904 masm->jalr(t9); 3905 // Set up sp in the delay slot. 3906 masm->addiu(sp, sp, -kCArgsSlotsSize); 3907 // Make sure the stored 'ra' points to this position. 3908 ASSERT_EQ(kNumInstructionsToJump, 3909 masm->InstructionsGeneratedSince(&find_ra)); 3910 } 3911 3912 if (always_allocate) { 3913 // It's okay to clobber a2 and a3 here. v0 & v1 contain result. 3914 __ li(a2, Operand(scope_depth)); 3915 __ lw(a3, MemOperand(a2)); 3916 __ Subu(a3, a3, Operand(1)); 3917 __ sw(a3, MemOperand(a2)); 3918 } 3919 3920 // Check for failure result. 3921 Label failure_returned; 3922 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); 3923 __ addiu(a2, v0, 1); 3924 __ andi(t0, a2, kFailureTagMask); 3925 __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg)); 3926 // Restore stack (remove arg slots) in branch delay slot. 3927 __ addiu(sp, sp, kCArgsSlotsSize); 3928 3929 3930 // Exit C frame and return. 3931 // v0:v1: result 3932 // sp: stack pointer 3933 // fp: frame pointer 3934 __ LeaveExitFrame(save_doubles_, s0, true); 3935 3936 // Check if we should retry or throw exception. 3937 Label retry; 3938 __ bind(&failure_returned); 3939 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); 3940 __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize); 3941 __ Branch(&retry, eq, t0, Operand(zero_reg)); 3942 3943 // Special handling of out of memory exceptions. 3944 Failure* out_of_memory = Failure::OutOfMemoryException(); 3945 __ Branch(USE_DELAY_SLOT, 3946 throw_out_of_memory_exception, 3947 eq, 3948 v0, 3949 Operand(reinterpret_cast<int32_t>(out_of_memory))); 3950 // If we throw the OOM exception, the value of a3 doesn't matter. 3951 // Any instruction can be in the delay slot that's not a jump. 3952 3953 // Retrieve the pending exception and clear the variable. 3954 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); 3955 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 3956 isolate))); 3957 __ lw(v0, MemOperand(t0)); 3958 __ sw(a3, MemOperand(t0)); 3959 3960 // Special handling of termination exceptions which are uncatchable 3961 // by javascript code. 3962 __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex); 3963 __ Branch(throw_termination_exception, eq, v0, Operand(t0)); 3964 3965 // Handle normal exception. 3966 __ jmp(throw_normal_exception); 3967 3968 __ bind(&retry); 3969 // Last failure (v0) will be moved to (a0) for parameter when retrying. 3970} 3971 3972 3973void CEntryStub::Generate(MacroAssembler* masm) { 3974 // Called from JavaScript; parameters are on stack as if calling JS function 3975 // s0: number of arguments including receiver 3976 // s1: size of arguments excluding receiver 3977 // s2: pointer to builtin function 3978 // fp: frame pointer (restored after C call) 3979 // sp: stack pointer (restored as callee's sp after C call) 3980 // cp: current context (C callee-saved) 3981 3982 // NOTE: Invocations of builtins may return failure objects 3983 // instead of a proper result. The builtin entry handles 3984 // this by performing a garbage collection and retrying the 3985 // builtin once. 3986 3987 // NOTE: s0-s2 hold the arguments of this function instead of a0-a2. 3988 // The reason for this is that these arguments would need to be saved anyway 3989 // so it's faster to set them up directly. 3990 // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction. 3991 3992 // Compute the argv pointer in a callee-saved register. 3993 __ Addu(s1, sp, s1); 3994 3995 // Enter the exit frame that transitions from JavaScript to C++. 3996 FrameScope scope(masm, StackFrame::MANUAL); 3997 __ EnterExitFrame(save_doubles_); 3998 3999 // s0: number of arguments (C callee-saved) 4000 // s1: pointer to first argument (C callee-saved) 4001 // s2: pointer to builtin function (C callee-saved) 4002 4003 Label throw_normal_exception; 4004 Label throw_termination_exception; 4005 Label throw_out_of_memory_exception; 4006 4007 // Call into the runtime system. 4008 GenerateCore(masm, 4009 &throw_normal_exception, 4010 &throw_termination_exception, 4011 &throw_out_of_memory_exception, 4012 false, 4013 false); 4014 4015 // Do space-specific GC and retry runtime call. 4016 GenerateCore(masm, 4017 &throw_normal_exception, 4018 &throw_termination_exception, 4019 &throw_out_of_memory_exception, 4020 true, 4021 false); 4022 4023 // Do full GC and retry runtime call one final time. 4024 Failure* failure = Failure::InternalError(); 4025 __ li(v0, Operand(reinterpret_cast<int32_t>(failure))); 4026 GenerateCore(masm, 4027 &throw_normal_exception, 4028 &throw_termination_exception, 4029 &throw_out_of_memory_exception, 4030 true, 4031 true); 4032 4033 __ bind(&throw_out_of_memory_exception); 4034 // Set external caught exception to false. 4035 Isolate* isolate = masm->isolate(); 4036 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, 4037 isolate); 4038 __ li(a0, Operand(false, RelocInfo::NONE)); 4039 __ li(a2, Operand(external_caught)); 4040 __ sw(a0, MemOperand(a2)); 4041 4042 // Set pending exception and v0 to out of memory exception. 4043 Failure* out_of_memory = Failure::OutOfMemoryException(); 4044 __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); 4045 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 4046 isolate))); 4047 __ sw(v0, MemOperand(a2)); 4048 // Fall through to the next label. 4049 4050 __ bind(&throw_termination_exception); 4051 __ ThrowUncatchable(v0); 4052 4053 __ bind(&throw_normal_exception); 4054 __ Throw(v0); 4055} 4056 4057 4058void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { 4059 Label invoke, handler_entry, exit; 4060 Isolate* isolate = masm->isolate(); 4061 4062 // Registers: 4063 // a0: entry address 4064 // a1: function 4065 // a2: receiver 4066 // a3: argc 4067 // 4068 // Stack: 4069 // 4 args slots 4070 // args 4071 4072 // Save callee saved registers on the stack. 4073 __ MultiPush(kCalleeSaved | ra.bit()); 4074 4075 if (CpuFeatures::IsSupported(FPU)) { 4076 CpuFeatures::Scope scope(FPU); 4077 // Save callee-saved FPU registers. 4078 __ MultiPushFPU(kCalleeSavedFPU); 4079 // Set up the reserved register for 0.0. 4080 __ Move(kDoubleRegZero, 0.0); 4081 } 4082 4083 4084 // Load argv in s0 register. 4085 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; 4086 if (CpuFeatures::IsSupported(FPU)) { 4087 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; 4088 } 4089 4090 __ InitializeRootRegister(); 4091 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); 4092 4093 // We build an EntryFrame. 4094 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. 4095 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; 4096 __ li(t2, Operand(Smi::FromInt(marker))); 4097 __ li(t1, Operand(Smi::FromInt(marker))); 4098 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, 4099 isolate))); 4100 __ lw(t0, MemOperand(t0)); 4101 __ Push(t3, t2, t1, t0); 4102 // Set up frame pointer for the frame to be pushed. 4103 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); 4104 4105 // Registers: 4106 // a0: entry_address 4107 // a1: function 4108 // a2: receiver_pointer 4109 // a3: argc 4110 // s0: argv 4111 // 4112 // Stack: 4113 // caller fp | 4114 // function slot | entry frame 4115 // context slot | 4116 // bad fp (0xff...f) | 4117 // callee saved registers + ra 4118 // 4 args slots 4119 // args 4120 4121 // If this is the outermost JS call, set js_entry_sp value. 4122 Label non_outermost_js; 4123 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); 4124 __ li(t1, Operand(ExternalReference(js_entry_sp))); 4125 __ lw(t2, MemOperand(t1)); 4126 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); 4127 __ sw(fp, MemOperand(t1)); 4128 __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 4129 Label cont; 4130 __ b(&cont); 4131 __ nop(); // Branch delay slot nop. 4132 __ bind(&non_outermost_js); 4133 __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); 4134 __ bind(&cont); 4135 __ push(t0); 4136 4137 // Jump to a faked try block that does the invoke, with a faked catch 4138 // block that sets the pending exception. 4139 __ jmp(&invoke); 4140 __ bind(&handler_entry); 4141 handler_offset_ = handler_entry.pos(); 4142 // Caught exception: Store result (exception) in the pending exception 4143 // field in the JSEnv and return a failure sentinel. Coming in here the 4144 // fp will be invalid because the PushTryHandler below sets it to 0 to 4145 // signal the existence of the JSEntry frame. 4146 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 4147 isolate))); 4148 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. 4149 __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); 4150 __ b(&exit); // b exposes branch delay slot. 4151 __ nop(); // Branch delay slot nop. 4152 4153 // Invoke: Link this frame into the handler chain. There's only one 4154 // handler block in this code object, so its index is 0. 4155 __ bind(&invoke); 4156 __ PushTryHandler(StackHandler::JS_ENTRY, 0); 4157 // If an exception not caught by another handler occurs, this handler 4158 // returns control to the code after the bal(&invoke) above, which 4159 // restores all kCalleeSaved registers (including cp and fp) to their 4160 // saved values before returning a failure to C. 4161 4162 // Clear any pending exceptions. 4163 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); 4164 __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 4165 isolate))); 4166 __ sw(t1, MemOperand(t0)); 4167 4168 // Invoke the function by calling through JS entry trampoline builtin. 4169 // Notice that we cannot store a reference to the trampoline code directly in 4170 // this stub, because runtime stubs are not traversed when doing GC. 4171 4172 // Registers: 4173 // a0: entry_address 4174 // a1: function 4175 // a2: receiver_pointer 4176 // a3: argc 4177 // s0: argv 4178 // 4179 // Stack: 4180 // handler frame 4181 // entry frame 4182 // callee saved registers + ra 4183 // 4 args slots 4184 // args 4185 4186 if (is_construct) { 4187 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, 4188 isolate); 4189 __ li(t0, Operand(construct_entry)); 4190 } else { 4191 ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); 4192 __ li(t0, Operand(entry)); 4193 } 4194 __ lw(t9, MemOperand(t0)); // Deref address. 4195 4196 // Call JSEntryTrampoline. 4197 __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); 4198 __ Call(t9); 4199 4200 // Unlink this frame from the handler chain. 4201 __ PopTryHandler(); 4202 4203 __ bind(&exit); // v0 holds result 4204 // Check if the current stack frame is marked as the outermost JS frame. 4205 Label non_outermost_js_2; 4206 __ pop(t1); 4207 __ Branch(&non_outermost_js_2, 4208 ne, 4209 t1, 4210 Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); 4211 __ li(t1, Operand(ExternalReference(js_entry_sp))); 4212 __ sw(zero_reg, MemOperand(t1)); 4213 __ bind(&non_outermost_js_2); 4214 4215 // Restore the top frame descriptors from the stack. 4216 __ pop(t1); 4217 __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, 4218 isolate))); 4219 __ sw(t1, MemOperand(t0)); 4220 4221 // Reset the stack to the callee saved registers. 4222 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); 4223 4224 if (CpuFeatures::IsSupported(FPU)) { 4225 CpuFeatures::Scope scope(FPU); 4226 // Restore callee-saved fpu registers. 4227 __ MultiPopFPU(kCalleeSavedFPU); 4228 } 4229 4230 // Restore callee saved registers from the stack. 4231 __ MultiPop(kCalleeSaved | ra.bit()); 4232 // Return. 4233 __ Jump(ra); 4234} 4235 4236 4237// Uses registers a0 to t0. 4238// Expected input (depending on whether args are in registers or on the stack): 4239// * object: a0 or at sp + 1 * kPointerSize. 4240// * function: a1 or at sp. 4241// 4242// An inlined call site may have been generated before calling this stub. 4243// In this case the offset to the inline site to patch is passed on the stack, 4244// in the safepoint slot for register t0. 4245void InstanceofStub::Generate(MacroAssembler* masm) { 4246 // Call site inlining and patching implies arguments in registers. 4247 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); 4248 // ReturnTrueFalse is only implemented for inlined call sites. 4249 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); 4250 4251 // Fixed register usage throughout the stub: 4252 const Register object = a0; // Object (lhs). 4253 Register map = a3; // Map of the object. 4254 const Register function = a1; // Function (rhs). 4255 const Register prototype = t0; // Prototype of the function. 4256 const Register inline_site = t5; 4257 const Register scratch = a2; 4258 4259 const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize; 4260 4261 Label slow, loop, is_instance, is_not_instance, not_js_object; 4262 4263 if (!HasArgsInRegisters()) { 4264 __ lw(object, MemOperand(sp, 1 * kPointerSize)); 4265 __ lw(function, MemOperand(sp, 0)); 4266 } 4267 4268 // Check that the left hand is a JS object and load map. 4269 __ JumpIfSmi(object, ¬_js_object); 4270 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); 4271 4272 // If there is a call site cache don't look in the global cache, but do the 4273 // real lookup and update the call site cache. 4274 if (!HasCallSiteInlineCheck()) { 4275 Label miss; 4276 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex); 4277 __ Branch(&miss, ne, function, Operand(at)); 4278 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex); 4279 __ Branch(&miss, ne, map, Operand(at)); 4280 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); 4281 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4282 4283 __ bind(&miss); 4284 } 4285 4286 // Get the prototype of the function. 4287 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); 4288 4289 // Check that the function prototype is a JS object. 4290 __ JumpIfSmi(prototype, &slow); 4291 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); 4292 4293 // Update the global instanceof or call site inlined cache with the current 4294 // map and function. The cached answer will be set when it is known below. 4295 if (!HasCallSiteInlineCheck()) { 4296 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); 4297 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); 4298 } else { 4299 ASSERT(HasArgsInRegisters()); 4300 // Patch the (relocated) inlined map check. 4301 4302 // The offset was stored in t0 safepoint slot. 4303 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). 4304 __ LoadFromSafepointRegisterSlot(scratch, t0); 4305 __ Subu(inline_site, ra, scratch); 4306 // Get the map location in scratch and patch it. 4307 __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch. 4308 __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); 4309 } 4310 4311 // Register mapping: a3 is object map and t0 is function prototype. 4312 // Get prototype of object into a2. 4313 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); 4314 4315 // We don't need map any more. Use it as a scratch register. 4316 Register scratch2 = map; 4317 map = no_reg; 4318 4319 // Loop through the prototype chain looking for the function prototype. 4320 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); 4321 __ bind(&loop); 4322 __ Branch(&is_instance, eq, scratch, Operand(prototype)); 4323 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); 4324 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); 4325 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); 4326 __ Branch(&loop); 4327 4328 __ bind(&is_instance); 4329 ASSERT(Smi::FromInt(0) == 0); 4330 if (!HasCallSiteInlineCheck()) { 4331 __ mov(v0, zero_reg); 4332 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); 4333 } else { 4334 // Patch the call site to return true. 4335 __ LoadRoot(v0, Heap::kTrueValueRootIndex); 4336 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); 4337 // Get the boolean result location in scratch and patch it. 4338 __ PatchRelocatedValue(inline_site, scratch, v0); 4339 4340 if (!ReturnTrueFalseObject()) { 4341 ASSERT_EQ(Smi::FromInt(0), 0); 4342 __ mov(v0, zero_reg); 4343 } 4344 } 4345 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4346 4347 __ bind(&is_not_instance); 4348 if (!HasCallSiteInlineCheck()) { 4349 __ li(v0, Operand(Smi::FromInt(1))); 4350 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); 4351 } else { 4352 // Patch the call site to return false. 4353 __ LoadRoot(v0, Heap::kFalseValueRootIndex); 4354 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); 4355 // Get the boolean result location in scratch and patch it. 4356 __ PatchRelocatedValue(inline_site, scratch, v0); 4357 4358 if (!ReturnTrueFalseObject()) { 4359 __ li(v0, Operand(Smi::FromInt(1))); 4360 } 4361 } 4362 4363 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4364 4365 Label object_not_null, object_not_null_or_smi; 4366 __ bind(¬_js_object); 4367 // Before null, smi and string value checks, check that the rhs is a function 4368 // as for a non-function rhs an exception needs to be thrown. 4369 __ JumpIfSmi(function, &slow); 4370 __ GetObjectType(function, scratch2, scratch); 4371 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); 4372 4373 // Null is not instance of anything. 4374 __ Branch(&object_not_null, 4375 ne, 4376 scratch, 4377 Operand(masm->isolate()->factory()->null_value())); 4378 __ li(v0, Operand(Smi::FromInt(1))); 4379 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4380 4381 __ bind(&object_not_null); 4382 // Smi values are not instances of anything. 4383 __ JumpIfNotSmi(object, &object_not_null_or_smi); 4384 __ li(v0, Operand(Smi::FromInt(1))); 4385 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4386 4387 __ bind(&object_not_null_or_smi); 4388 // String values are not instances of anything. 4389 __ IsObjectJSStringType(object, scratch, &slow); 4390 __ li(v0, Operand(Smi::FromInt(1))); 4391 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4392 4393 // Slow-case. Tail call builtin. 4394 __ bind(&slow); 4395 if (!ReturnTrueFalseObject()) { 4396 if (HasArgsInRegisters()) { 4397 __ Push(a0, a1); 4398 } 4399 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 4400 } else { 4401 { 4402 FrameScope scope(masm, StackFrame::INTERNAL); 4403 __ Push(a0, a1); 4404 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); 4405 } 4406 __ mov(a0, v0); 4407 __ LoadRoot(v0, Heap::kTrueValueRootIndex); 4408 __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg)); 4409 __ LoadRoot(v0, Heap::kFalseValueRootIndex); 4410 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); 4411 } 4412} 4413 4414 4415Register InstanceofStub::left() { return a0; } 4416 4417 4418Register InstanceofStub::right() { return a1; } 4419 4420 4421void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 4422 // The displacement is the offset of the last parameter (if any) 4423 // relative to the frame pointer. 4424 const int kDisplacement = 4425 StandardFrameConstants::kCallerSPOffset - kPointerSize; 4426 4427 // Check that the key is a smiGenerateReadElement. 4428 Label slow; 4429 __ JumpIfNotSmi(a1, &slow); 4430 4431 // Check if the calling frame is an arguments adaptor frame. 4432 Label adaptor; 4433 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 4434 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); 4435 __ Branch(&adaptor, 4436 eq, 4437 a3, 4438 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 4439 4440 // Check index (a1) against formal parameters count limit passed in 4441 // through register a0. Use unsigned comparison to get negative 4442 // check for free. 4443 __ Branch(&slow, hs, a1, Operand(a0)); 4444 4445 // Read the argument from the stack and return it. 4446 __ subu(a3, a0, a1); 4447 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); 4448 __ Addu(a3, fp, Operand(t3)); 4449 __ lw(v0, MemOperand(a3, kDisplacement)); 4450 __ Ret(); 4451 4452 // Arguments adaptor case: Check index (a1) against actual arguments 4453 // limit found in the arguments adaptor frame. Use unsigned 4454 // comparison to get negative check for free. 4455 __ bind(&adaptor); 4456 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4457 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); 4458 4459 // Read the argument from the adaptor frame and return it. 4460 __ subu(a3, a0, a1); 4461 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); 4462 __ Addu(a3, a2, Operand(t3)); 4463 __ lw(v0, MemOperand(a3, kDisplacement)); 4464 __ Ret(); 4465 4466 // Slow-case: Handle non-smi or out-of-bounds access to arguments 4467 // by calling the runtime system. 4468 __ bind(&slow); 4469 __ push(a1); 4470 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); 4471} 4472 4473 4474void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { 4475 // sp[0] : number of parameters 4476 // sp[4] : receiver displacement 4477 // sp[8] : function 4478 // Check if the calling frame is an arguments adaptor frame. 4479 Label runtime; 4480 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 4481 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); 4482 __ Branch(&runtime, 4483 ne, 4484 a2, 4485 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 4486 4487 // Patch the arguments.length and the parameters pointer in the current frame. 4488 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4489 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); 4490 __ sll(t3, a2, 1); 4491 __ Addu(a3, a3, Operand(t3)); 4492 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset); 4493 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); 4494 4495 __ bind(&runtime); 4496 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 4497} 4498 4499 4500void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { 4501 // Stack layout: 4502 // sp[0] : number of parameters (tagged) 4503 // sp[4] : address of receiver argument 4504 // sp[8] : function 4505 // Registers used over whole function: 4506 // t2 : allocated object (tagged) 4507 // t5 : mapped parameter count (tagged) 4508 4509 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); 4510 // a1 = parameter count (tagged) 4511 4512 // Check if the calling frame is an arguments adaptor frame. 4513 Label runtime; 4514 Label adaptor_frame, try_allocate; 4515 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 4516 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); 4517 __ Branch(&adaptor_frame, 4518 eq, 4519 a2, 4520 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 4521 4522 // No adaptor, parameter count = argument count. 4523 __ mov(a2, a1); 4524 __ b(&try_allocate); 4525 __ nop(); // Branch delay slot nop. 4526 4527 // We have an adaptor frame. Patch the parameters pointer. 4528 __ bind(&adaptor_frame); 4529 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4530 __ sll(t6, a2, 1); 4531 __ Addu(a3, a3, Operand(t6)); 4532 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); 4533 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); 4534 4535 // a1 = parameter count (tagged) 4536 // a2 = argument count (tagged) 4537 // Compute the mapped parameter count = min(a1, a2) in a1. 4538 Label skip_min; 4539 __ Branch(&skip_min, lt, a1, Operand(a2)); 4540 __ mov(a1, a2); 4541 __ bind(&skip_min); 4542 4543 __ bind(&try_allocate); 4544 4545 // Compute the sizes of backing store, parameter map, and arguments object. 4546 // 1. Parameter map, has 2 extra words containing context and backing store. 4547 const int kParameterMapHeaderSize = 4548 FixedArray::kHeaderSize + 2 * kPointerSize; 4549 // If there are no mapped parameters, we do not need the parameter_map. 4550 Label param_map_size; 4551 ASSERT_EQ(0, Smi::FromInt(0)); 4552 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); 4553 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0. 4554 __ sll(t5, a1, 1); 4555 __ addiu(t5, t5, kParameterMapHeaderSize); 4556 __ bind(¶m_map_size); 4557 4558 // 2. Backing store. 4559 __ sll(t6, a2, 1); 4560 __ Addu(t5, t5, Operand(t6)); 4561 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); 4562 4563 // 3. Arguments object. 4564 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize)); 4565 4566 // Do the allocation of all three objects in one go. 4567 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT); 4568 4569 // v0 = address of new object(s) (tagged) 4570 // a2 = argument count (tagged) 4571 // Get the arguments boilerplate from the current (global) context into t0. 4572 const int kNormalOffset = 4573 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); 4574 const int kAliasedOffset = 4575 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); 4576 4577 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 4578 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset)); 4579 Label skip2_ne, skip2_eq; 4580 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg)); 4581 __ lw(t0, MemOperand(t0, kNormalOffset)); 4582 __ bind(&skip2_ne); 4583 4584 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg)); 4585 __ lw(t0, MemOperand(t0, kAliasedOffset)); 4586 __ bind(&skip2_eq); 4587 4588 // v0 = address of new object (tagged) 4589 // a1 = mapped parameter count (tagged) 4590 // a2 = argument count (tagged) 4591 // t0 = address of boilerplate object (tagged) 4592 // Copy the JS object part. 4593 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { 4594 __ lw(a3, FieldMemOperand(t0, i)); 4595 __ sw(a3, FieldMemOperand(v0, i)); 4596 } 4597 4598 // Set up the callee in-object property. 4599 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); 4600 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); 4601 const int kCalleeOffset = JSObject::kHeaderSize + 4602 Heap::kArgumentsCalleeIndex * kPointerSize; 4603 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); 4604 4605 // Use the length (smi tagged) and set that as an in-object property too. 4606 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4607 const int kLengthOffset = JSObject::kHeaderSize + 4608 Heap::kArgumentsLengthIndex * kPointerSize; 4609 __ sw(a2, FieldMemOperand(v0, kLengthOffset)); 4610 4611 // Set up the elements pointer in the allocated arguments object. 4612 // If we allocated a parameter map, t0 will point there, otherwise 4613 // it will point to the backing store. 4614 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize)); 4615 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); 4616 4617 // v0 = address of new object (tagged) 4618 // a1 = mapped parameter count (tagged) 4619 // a2 = argument count (tagged) 4620 // t0 = address of parameter map or backing store (tagged) 4621 // Initialize parameter map. If there are no mapped arguments, we're done. 4622 Label skip_parameter_map; 4623 Label skip3; 4624 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0))); 4625 // Move backing store address to a3, because it is 4626 // expected there when filling in the unmapped arguments. 4627 __ mov(a3, t0); 4628 __ bind(&skip3); 4629 4630 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0))); 4631 4632 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex); 4633 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset)); 4634 __ Addu(t2, a1, Operand(Smi::FromInt(2))); 4635 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset)); 4636 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize)); 4637 __ sll(t6, a1, 1); 4638 __ Addu(t2, t0, Operand(t6)); 4639 __ Addu(t2, t2, Operand(kParameterMapHeaderSize)); 4640 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize)); 4641 4642 // Copy the parameter slots and the holes in the arguments. 4643 // We need to fill in mapped_parameter_count slots. They index the context, 4644 // where parameters are stored in reverse order, at 4645 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 4646 // The mapped parameter thus need to get indices 4647 // MIN_CONTEXT_SLOTS+parameter_count-1 .. 4648 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count 4649 // We loop from right to left. 4650 Label parameters_loop, parameters_test; 4651 __ mov(t2, a1); 4652 __ lw(t5, MemOperand(sp, 0 * kPointerSize)); 4653 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); 4654 __ Subu(t5, t5, Operand(a1)); 4655 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); 4656 __ sll(t6, t2, 1); 4657 __ Addu(a3, t0, Operand(t6)); 4658 __ Addu(a3, a3, Operand(kParameterMapHeaderSize)); 4659 4660 // t2 = loop variable (tagged) 4661 // a1 = mapping index (tagged) 4662 // a3 = address of backing store (tagged) 4663 // t0 = address of parameter map (tagged) 4664 // t1 = temporary scratch (a.o., for address calculation) 4665 // t3 = the hole value 4666 __ jmp(¶meters_test); 4667 4668 __ bind(¶meters_loop); 4669 __ Subu(t2, t2, Operand(Smi::FromInt(1))); 4670 __ sll(t1, t2, 1); 4671 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag)); 4672 __ Addu(t6, t0, t1); 4673 __ sw(t5, MemOperand(t6)); 4674 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); 4675 __ Addu(t6, a3, t1); 4676 __ sw(t3, MemOperand(t6)); 4677 __ Addu(t5, t5, Operand(Smi::FromInt(1))); 4678 __ bind(¶meters_test); 4679 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0))); 4680 4681 __ bind(&skip_parameter_map); 4682 // a2 = argument count (tagged) 4683 // a3 = address of backing store (tagged) 4684 // t1 = scratch 4685 // Copy arguments header and remaining slots (if there are any). 4686 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); 4687 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset)); 4688 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset)); 4689 4690 Label arguments_loop, arguments_test; 4691 __ mov(t5, a1); 4692 __ lw(t0, MemOperand(sp, 1 * kPointerSize)); 4693 __ sll(t6, t5, 1); 4694 __ Subu(t0, t0, Operand(t6)); 4695 __ jmp(&arguments_test); 4696 4697 __ bind(&arguments_loop); 4698 __ Subu(t0, t0, Operand(kPointerSize)); 4699 __ lw(t2, MemOperand(t0, 0)); 4700 __ sll(t6, t5, 1); 4701 __ Addu(t1, a3, Operand(t6)); 4702 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize)); 4703 __ Addu(t5, t5, Operand(Smi::FromInt(1))); 4704 4705 __ bind(&arguments_test); 4706 __ Branch(&arguments_loop, lt, t5, Operand(a2)); 4707 4708 // Return and remove the on-stack parameters. 4709 __ DropAndRet(3); 4710 4711 // Do the runtime call to allocate the arguments object. 4712 // a2 = argument count (tagged) 4713 __ bind(&runtime); 4714 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. 4715 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); 4716} 4717 4718 4719void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { 4720 // sp[0] : number of parameters 4721 // sp[4] : receiver displacement 4722 // sp[8] : function 4723 // Check if the calling frame is an arguments adaptor frame. 4724 Label adaptor_frame, try_allocate, runtime; 4725 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 4726 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); 4727 __ Branch(&adaptor_frame, 4728 eq, 4729 a3, 4730 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 4731 4732 // Get the length from the frame. 4733 __ lw(a1, MemOperand(sp, 0)); 4734 __ Branch(&try_allocate); 4735 4736 // Patch the arguments.length and the parameters pointer. 4737 __ bind(&adaptor_frame); 4738 __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); 4739 __ sw(a1, MemOperand(sp, 0)); 4740 __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize); 4741 __ Addu(a3, a2, Operand(at)); 4742 4743 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); 4744 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); 4745 4746 // Try the new space allocation. Start out with computing the size 4747 // of the arguments object and the elements array in words. 4748 Label add_arguments_object; 4749 __ bind(&try_allocate); 4750 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); 4751 __ srl(a1, a1, kSmiTagSize); 4752 4753 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); 4754 __ bind(&add_arguments_object); 4755 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); 4756 4757 // Do the allocation of both objects in one go. 4758 __ AllocateInNewSpace(a1, 4759 v0, 4760 a2, 4761 a3, 4762 &runtime, 4763 static_cast<AllocationFlags>(TAG_OBJECT | 4764 SIZE_IN_WORDS)); 4765 4766 // Get the arguments boilerplate from the current (global) context. 4767 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 4768 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset)); 4769 __ lw(t0, MemOperand(t0, Context::SlotOffset( 4770 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); 4771 4772 // Copy the JS object part. 4773 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize); 4774 4775 // Get the length (smi tagged) and set that as an in-object property too. 4776 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); 4777 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); 4778 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + 4779 Heap::kArgumentsLengthIndex * kPointerSize)); 4780 4781 Label done; 4782 __ Branch(&done, eq, a1, Operand(zero_reg)); 4783 4784 // Get the parameters pointer from the stack. 4785 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); 4786 4787 // Set up the elements pointer in the allocated arguments object and 4788 // initialize the header in the elements fixed array. 4789 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict)); 4790 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); 4791 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); 4792 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); 4793 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); 4794 // Untag the length for the loop. 4795 __ srl(a1, a1, kSmiTagSize); 4796 4797 // Copy the fixed array slots. 4798 Label loop; 4799 // Set up t0 to point to the first array slot. 4800 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 4801 __ bind(&loop); 4802 // Pre-decrement a2 with kPointerSize on each iteration. 4803 // Pre-decrement in order to skip receiver. 4804 __ Addu(a2, a2, Operand(-kPointerSize)); 4805 __ lw(a3, MemOperand(a2)); 4806 // Post-increment t0 with kPointerSize on each iteration. 4807 __ sw(a3, MemOperand(t0)); 4808 __ Addu(t0, t0, Operand(kPointerSize)); 4809 __ Subu(a1, a1, Operand(1)); 4810 __ Branch(&loop, ne, a1, Operand(zero_reg)); 4811 4812 // Return and remove the on-stack parameters. 4813 __ bind(&done); 4814 __ DropAndRet(3); 4815 4816 // Do the runtime call to allocate the arguments object. 4817 __ bind(&runtime); 4818 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); 4819} 4820 4821 4822void RegExpExecStub::Generate(MacroAssembler* masm) { 4823 // Just jump directly to runtime if native RegExp is not selected at compile 4824 // time or if regexp entry in generated code is turned off runtime switch or 4825 // at compilation. 4826#ifdef V8_INTERPRETED_REGEXP 4827 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 4828#else // V8_INTERPRETED_REGEXP 4829 4830 // Stack frame on entry. 4831 // sp[0]: last_match_info (expected JSArray) 4832 // sp[4]: previous index 4833 // sp[8]: subject string 4834 // sp[12]: JSRegExp object 4835 4836 const int kLastMatchInfoOffset = 0 * kPointerSize; 4837 const int kPreviousIndexOffset = 1 * kPointerSize; 4838 const int kSubjectOffset = 2 * kPointerSize; 4839 const int kJSRegExpOffset = 3 * kPointerSize; 4840 4841 Isolate* isolate = masm->isolate(); 4842 4843 Label runtime, invoke_regexp; 4844 4845 // Allocation of registers for this function. These are in callee save 4846 // registers and will be preserved by the call to the native RegExp code, as 4847 // this code is called using the normal C calling convention. When calling 4848 // directly from generated code the native RegExp code will not do a GC and 4849 // therefore the content of these registers are safe to use after the call. 4850 // MIPS - using s0..s2, since we are not using CEntry Stub. 4851 Register subject = s0; 4852 Register regexp_data = s1; 4853 Register last_match_info_elements = s2; 4854 4855 // Ensure that a RegExp stack is allocated. 4856 ExternalReference address_of_regexp_stack_memory_address = 4857 ExternalReference::address_of_regexp_stack_memory_address( 4858 isolate); 4859 ExternalReference address_of_regexp_stack_memory_size = 4860 ExternalReference::address_of_regexp_stack_memory_size(isolate); 4861 __ li(a0, Operand(address_of_regexp_stack_memory_size)); 4862 __ lw(a0, MemOperand(a0, 0)); 4863 __ Branch(&runtime, eq, a0, Operand(zero_reg)); 4864 4865 // Check that the first argument is a JSRegExp object. 4866 __ lw(a0, MemOperand(sp, kJSRegExpOffset)); 4867 STATIC_ASSERT(kSmiTag == 0); 4868 __ JumpIfSmi(a0, &runtime); 4869 __ GetObjectType(a0, a1, a1); 4870 __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE)); 4871 4872 // Check that the RegExp has been compiled (data contains a fixed array). 4873 __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset)); 4874 if (FLAG_debug_code) { 4875 __ And(t0, regexp_data, Operand(kSmiTagMask)); 4876 __ Check(nz, 4877 "Unexpected type for RegExp data, FixedArray expected", 4878 t0, 4879 Operand(zero_reg)); 4880 __ GetObjectType(regexp_data, a0, a0); 4881 __ Check(eq, 4882 "Unexpected type for RegExp data, FixedArray expected", 4883 a0, 4884 Operand(FIXED_ARRAY_TYPE)); 4885 } 4886 4887 // regexp_data: RegExp data (FixedArray) 4888 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. 4889 __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); 4890 __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); 4891 4892 // regexp_data: RegExp data (FixedArray) 4893 // Check that the number of captures fit in the static offsets vector buffer. 4894 __ lw(a2, 4895 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); 4896 // Calculate number of capture registers (number_of_captures + 1) * 2. This 4897 // uses the asumption that smis are 2 * their untagged value. 4898 STATIC_ASSERT(kSmiTag == 0); 4899 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 4900 __ Addu(a2, a2, Operand(2)); // a2 was a smi. 4901 // Check that the static offsets vector buffer is large enough. 4902 __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); 4903 4904 // a2: Number of capture registers 4905 // regexp_data: RegExp data (FixedArray) 4906 // Check that the second argument is a string. 4907 __ lw(subject, MemOperand(sp, kSubjectOffset)); 4908 __ JumpIfSmi(subject, &runtime); 4909 __ GetObjectType(subject, a0, a0); 4910 __ And(a0, a0, Operand(kIsNotStringMask)); 4911 STATIC_ASSERT(kStringTag == 0); 4912 __ Branch(&runtime, ne, a0, Operand(zero_reg)); 4913 4914 // Get the length of the string to r3. 4915 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset)); 4916 4917 // a2: Number of capture registers 4918 // a3: Length of subject string as a smi 4919 // subject: Subject string 4920 // regexp_data: RegExp data (FixedArray) 4921 // Check that the third argument is a positive smi less than the subject 4922 // string length. A negative value will be greater (unsigned comparison). 4923 __ lw(a0, MemOperand(sp, kPreviousIndexOffset)); 4924 __ JumpIfNotSmi(a0, &runtime); 4925 __ Branch(&runtime, ls, a3, Operand(a0)); 4926 4927 // a2: Number of capture registers 4928 // subject: Subject string 4929 // regexp_data: RegExp data (FixedArray) 4930 // Check that the fourth object is a JSArray object. 4931 __ lw(a0, MemOperand(sp, kLastMatchInfoOffset)); 4932 __ JumpIfSmi(a0, &runtime); 4933 __ GetObjectType(a0, a1, a1); 4934 __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE)); 4935 // Check that the JSArray is in fast case. 4936 __ lw(last_match_info_elements, 4937 FieldMemOperand(a0, JSArray::kElementsOffset)); 4938 __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); 4939 __ Branch(&runtime, ne, a0, Operand( 4940 isolate->factory()->fixed_array_map())); 4941 // Check that the last match info has space for the capture registers and the 4942 // additional information. 4943 __ lw(a0, 4944 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); 4945 __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead)); 4946 __ sra(at, a0, kSmiTagSize); // Untag length for comparison. 4947 __ Branch(&runtime, gt, a2, Operand(at)); 4948 4949 // Reset offset for possibly sliced string. 4950 __ mov(t0, zero_reg); 4951 // subject: Subject string 4952 // regexp_data: RegExp data (FixedArray) 4953 // Check the representation and encoding of the subject string. 4954 Label seq_string; 4955 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); 4956 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); 4957 // First check for flat string. None of the following string type tests will 4958 // succeed if subject is not a string or a short external string. 4959 __ And(a1, 4960 a0, 4961 Operand(kIsNotStringMask | 4962 kStringRepresentationMask | 4963 kShortExternalStringMask)); 4964 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); 4965 __ Branch(&seq_string, eq, a1, Operand(zero_reg)); 4966 4967 // subject: Subject string 4968 // a0: instance type if Subject string 4969 // regexp_data: RegExp data (FixedArray) 4970 // a1: whether subject is a string and if yes, its string representation 4971 // Check for flat cons string or sliced string. 4972 // A flat cons string is a cons string where the second part is the empty 4973 // string. In that case the subject string is just the first part of the cons 4974 // string. Also in this case the first part of the cons string is known to be 4975 // a sequential string or an external string. 4976 // In the case of a sliced string its offset has to be taken into account. 4977 Label cons_string, external_string, check_encoding; 4978 STATIC_ASSERT(kConsStringTag < kExternalStringTag); 4979 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); 4980 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); 4981 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); 4982 __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag)); 4983 __ Branch(&external_string, eq, a1, Operand(kExternalStringTag)); 4984 4985 // Catch non-string subject or short external string. 4986 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); 4987 __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask)); 4988 __ Branch(&runtime, ne, at, Operand(zero_reg)); 4989 4990 // String is sliced. 4991 __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset)); 4992 __ sra(t0, t0, kSmiTagSize); 4993 __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); 4994 // t5: offset of sliced string, smi-tagged. 4995 __ jmp(&check_encoding); 4996 // String is a cons string, check whether it is flat. 4997 __ bind(&cons_string); 4998 __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset)); 4999 __ LoadRoot(a1, Heap::kEmptyStringRootIndex); 5000 __ Branch(&runtime, ne, a0, Operand(a1)); 5001 __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); 5002 // Is first part of cons or parent of slice a flat string? 5003 __ bind(&check_encoding); 5004 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); 5005 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); 5006 STATIC_ASSERT(kSeqStringTag == 0); 5007 __ And(at, a0, Operand(kStringRepresentationMask)); 5008 __ Branch(&external_string, ne, at, Operand(zero_reg)); 5009 5010 __ bind(&seq_string); 5011 // subject: Subject string 5012 // regexp_data: RegExp data (FixedArray) 5013 // a0: Instance type of subject string 5014 STATIC_ASSERT(kStringEncodingMask == 4); 5015 STATIC_ASSERT(kAsciiStringTag == 4); 5016 STATIC_ASSERT(kTwoByteStringTag == 0); 5017 // Find the code object based on the assumptions above. 5018 __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. 5019 __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); 5020 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). 5021 __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); 5022 __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. 5023 5024 // Check that the irregexp code has been generated for the actual string 5025 // encoding. If it has, the field contains a code object otherwise it contains 5026 // a smi (code flushing support). 5027 __ JumpIfSmi(t9, &runtime); 5028 5029 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); 5030 // t9: code 5031 // subject: Subject string 5032 // regexp_data: RegExp data (FixedArray) 5033 // Load used arguments before starting to push arguments for call to native 5034 // RegExp code to avoid handling changing stack height. 5035 __ lw(a1, MemOperand(sp, kPreviousIndexOffset)); 5036 __ sra(a1, a1, kSmiTagSize); // Untag the Smi. 5037 5038 // a1: previous index 5039 // a3: encoding of subject string (1 if ASCII, 0 if two_byte); 5040 // t9: code 5041 // subject: Subject string 5042 // regexp_data: RegExp data (FixedArray) 5043 // All checks done. Now push arguments for native regexp code. 5044 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 5045 1, a0, a2); 5046 5047 // Isolates: note we add an additional parameter here (isolate pointer). 5048 const int kRegExpExecuteArguments = 8; 5049 const int kParameterRegisters = 4; 5050 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); 5051 5052 // Stack pointer now points to cell where return address is to be written. 5053 // Arguments are before that on the stack or in registers, meaning we 5054 // treat the return address as argument 5. Thus every argument after that 5055 // needs to be shifted back by 1. Since DirectCEntryStub will handle 5056 // allocating space for the c argument slots, we don't need to calculate 5057 // that into the argument positions on the stack. This is how the stack will 5058 // look (sp meaning the value of sp at this moment): 5059 // [sp + 4] - Argument 8 5060 // [sp + 3] - Argument 7 5061 // [sp + 2] - Argument 6 5062 // [sp + 1] - Argument 5 5063 // [sp + 0] - saved ra 5064 5065 // Argument 8: Pass current isolate address. 5066 // CFunctionArgumentOperand handles MIPS stack argument slots. 5067 __ li(a0, Operand(ExternalReference::isolate_address())); 5068 __ sw(a0, MemOperand(sp, 4 * kPointerSize)); 5069 5070 // Argument 7: Indicate that this is a direct call from JavaScript. 5071 __ li(a0, Operand(1)); 5072 __ sw(a0, MemOperand(sp, 3 * kPointerSize)); 5073 5074 // Argument 6: Start (high end) of backtracking stack memory area. 5075 __ li(a0, Operand(address_of_regexp_stack_memory_address)); 5076 __ lw(a0, MemOperand(a0, 0)); 5077 __ li(a2, Operand(address_of_regexp_stack_memory_size)); 5078 __ lw(a2, MemOperand(a2, 0)); 5079 __ addu(a0, a0, a2); 5080 __ sw(a0, MemOperand(sp, 2 * kPointerSize)); 5081 5082 // Argument 5: static offsets vector buffer. 5083 __ li(a0, Operand( 5084 ExternalReference::address_of_static_offsets_vector(isolate))); 5085 __ sw(a0, MemOperand(sp, 1 * kPointerSize)); 5086 5087 // For arguments 4 and 3 get string length, calculate start of string data 5088 // and calculate the shift of the index (0 for ASCII and 1 for two byte). 5089 __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); 5090 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. 5091 // Load the length from the original subject string from the previous stack 5092 // frame. Therefore we have to use fp, which points exactly to two pointer 5093 // sizes below the previous sp. (Because creating a new stack frame pushes 5094 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) 5095 __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); 5096 // If slice offset is not 0, load the length from the original sliced string. 5097 // Argument 4, a3: End of string data 5098 // Argument 3, a2: Start of string data 5099 // Prepare start and end index of the input. 5100 __ sllv(t1, t0, a3); 5101 __ addu(t0, t2, t1); 5102 __ sllv(t1, a1, a3); 5103 __ addu(a2, t0, t1); 5104 5105 __ lw(t2, FieldMemOperand(subject, String::kLengthOffset)); 5106 __ sra(t2, t2, kSmiTagSize); 5107 __ sllv(t1, t2, a3); 5108 __ addu(a3, t0, t1); 5109 // Argument 2 (a1): Previous index. 5110 // Already there 5111 5112 // Argument 1 (a0): Subject string. 5113 __ mov(a0, subject); 5114 5115 // Locate the code entry and call it. 5116 __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag)); 5117 DirectCEntryStub stub; 5118 stub.GenerateCall(masm, t9); 5119 5120 __ LeaveExitFrame(false, no_reg); 5121 5122 // v0: result 5123 // subject: subject string (callee saved) 5124 // regexp_data: RegExp data (callee saved) 5125 // last_match_info_elements: Last match info elements (callee saved) 5126 5127 // Check the result. 5128 5129 Label success; 5130 __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS)); 5131 Label failure; 5132 __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); 5133 // If not exception it can only be retry. Handle that in the runtime system. 5134 __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); 5135 // Result must now be exception. If there is no pending exception already a 5136 // stack overflow (on the backtrack stack) was detected in RegExp code but 5137 // haven't created the exception yet. Handle that in the runtime system. 5138 // TODO(592): Rerunning the RegExp to get the stack overflow exception. 5139 __ li(a1, Operand(isolate->factory()->the_hole_value())); 5140 __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, 5141 isolate))); 5142 __ lw(v0, MemOperand(a2, 0)); 5143 __ Branch(&runtime, eq, v0, Operand(a1)); 5144 5145 __ sw(a1, MemOperand(a2, 0)); // Clear pending exception. 5146 5147 // Check if the exception is a termination. If so, throw as uncatchable. 5148 __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex); 5149 Label termination_exception; 5150 __ Branch(&termination_exception, eq, v0, Operand(a0)); 5151 5152 __ Throw(v0); 5153 5154 __ bind(&termination_exception); 5155 __ ThrowUncatchable(v0); 5156 5157 __ bind(&failure); 5158 // For failure and exception return null. 5159 __ li(v0, Operand(isolate->factory()->null_value())); 5160 __ DropAndRet(4); 5161 5162 // Process the result from the native regexp code. 5163 __ bind(&success); 5164 __ lw(a1, 5165 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); 5166 // Calculate number of capture registers (number_of_captures + 1) * 2. 5167 STATIC_ASSERT(kSmiTag == 0); 5168 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 5169 __ Addu(a1, a1, Operand(2)); // a1 was a smi. 5170 5171 // a1: number of capture registers 5172 // subject: subject string 5173 // Store the capture count. 5174 __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi. 5175 __ sw(a2, FieldMemOperand(last_match_info_elements, 5176 RegExpImpl::kLastCaptureCountOffset)); 5177 // Store last subject and last input. 5178 __ sw(subject, 5179 FieldMemOperand(last_match_info_elements, 5180 RegExpImpl::kLastSubjectOffset)); 5181 __ mov(a2, subject); 5182 __ RecordWriteField(last_match_info_elements, 5183 RegExpImpl::kLastSubjectOffset, 5184 a2, 5185 t3, 5186 kRAHasNotBeenSaved, 5187 kDontSaveFPRegs); 5188 __ sw(subject, 5189 FieldMemOperand(last_match_info_elements, 5190 RegExpImpl::kLastInputOffset)); 5191 __ RecordWriteField(last_match_info_elements, 5192 RegExpImpl::kLastInputOffset, 5193 subject, 5194 t3, 5195 kRAHasNotBeenSaved, 5196 kDontSaveFPRegs); 5197 5198 // Get the static offsets vector filled by the native regexp code. 5199 ExternalReference address_of_static_offsets_vector = 5200 ExternalReference::address_of_static_offsets_vector(isolate); 5201 __ li(a2, Operand(address_of_static_offsets_vector)); 5202 5203 // a1: number of capture registers 5204 // a2: offsets vector 5205 Label next_capture, done; 5206 // Capture register counter starts from number of capture registers and 5207 // counts down until wrapping after zero. 5208 __ Addu(a0, 5209 last_match_info_elements, 5210 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); 5211 __ bind(&next_capture); 5212 __ Subu(a1, a1, Operand(1)); 5213 __ Branch(&done, lt, a1, Operand(zero_reg)); 5214 // Read the value from the static offsets vector buffer. 5215 __ lw(a3, MemOperand(a2, 0)); 5216 __ addiu(a2, a2, kPointerSize); 5217 // Store the smi value in the last match info. 5218 __ sll(a3, a3, kSmiTagSize); // Convert to Smi. 5219 __ sw(a3, MemOperand(a0, 0)); 5220 __ Branch(&next_capture, USE_DELAY_SLOT); 5221 __ addiu(a0, a0, kPointerSize); // In branch delay slot. 5222 5223 __ bind(&done); 5224 5225 // Return last match info. 5226 __ lw(v0, MemOperand(sp, kLastMatchInfoOffset)); 5227 __ DropAndRet(4); 5228 5229 // External string. Short external strings have already been ruled out. 5230 // a0: scratch 5231 __ bind(&external_string); 5232 __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); 5233 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); 5234 if (FLAG_debug_code) { 5235 // Assert that we do not have a cons or slice (indirect strings) here. 5236 // Sequential strings have already been ruled out. 5237 __ And(at, a0, Operand(kIsIndirectStringMask)); 5238 __ Assert(eq, 5239 "external string expected, but not found", 5240 at, 5241 Operand(zero_reg)); 5242 } 5243 __ lw(subject, 5244 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); 5245 // Move the pointer so that offset-wise, it looks like a sequential string. 5246 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); 5247 __ Subu(subject, 5248 subject, 5249 SeqTwoByteString::kHeaderSize - kHeapObjectTag); 5250 __ jmp(&seq_string); 5251 5252 // Do the runtime call to execute the regexp. 5253 __ bind(&runtime); 5254 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 5255#endif // V8_INTERPRETED_REGEXP 5256} 5257 5258 5259void RegExpConstructResultStub::Generate(MacroAssembler* masm) { 5260 const int kMaxInlineLength = 100; 5261 Label slowcase; 5262 Label done; 5263 __ lw(a1, MemOperand(sp, kPointerSize * 2)); 5264 STATIC_ASSERT(kSmiTag == 0); 5265 STATIC_ASSERT(kSmiTagSize == 1); 5266 __ JumpIfNotSmi(a1, &slowcase); 5267 __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength))); 5268 // Smi-tagging is equivalent to multiplying by 2. 5269 // Allocate RegExpResult followed by FixedArray with size in ebx. 5270 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] 5271 // Elements: [Map][Length][..elements..] 5272 // Size of JSArray with two in-object properties and the header of a 5273 // FixedArray. 5274 int objects_size = 5275 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; 5276 __ srl(t1, a1, kSmiTagSize + kSmiShiftSize); 5277 __ Addu(a2, t1, Operand(objects_size)); 5278 __ AllocateInNewSpace( 5279 a2, // In: Size, in words. 5280 v0, // Out: Start of allocation (tagged). 5281 a3, // Scratch register. 5282 t0, // Scratch register. 5283 &slowcase, 5284 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); 5285 // v0: Start of allocated area, object-tagged. 5286 // a1: Number of elements in array, as smi. 5287 // t1: Number of elements, untagged. 5288 5289 // Set JSArray map to global.regexp_result_map(). 5290 // Set empty properties FixedArray. 5291 // Set elements to point to FixedArray allocated right after the JSArray. 5292 // Interleave operations for better latency. 5293 __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX)); 5294 __ Addu(a3, v0, Operand(JSRegExpResult::kSize)); 5295 __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array())); 5296 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset)); 5297 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); 5298 __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX)); 5299 __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); 5300 __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); 5301 5302 // Set input, index and length fields from arguments. 5303 __ lw(a1, MemOperand(sp, kPointerSize * 0)); 5304 __ lw(a2, MemOperand(sp, kPointerSize * 1)); 5305 __ lw(t2, MemOperand(sp, kPointerSize * 2)); 5306 __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset)); 5307 __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset)); 5308 __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset)); 5309 5310 // Fill out the elements FixedArray. 5311 // v0: JSArray, tagged. 5312 // a3: FixedArray, tagged. 5313 // t1: Number of elements in array, untagged. 5314 5315 // Set map. 5316 __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map())); 5317 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); 5318 // Set FixedArray length. 5319 __ sll(t2, t1, kSmiTagSize); 5320 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset)); 5321 // Fill contents of fixed-array with the-hole. 5322 __ li(a2, Operand(masm->isolate()->factory()->the_hole_value())); 5323 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 5324 // Fill fixed array elements with hole. 5325 // v0: JSArray, tagged. 5326 // a2: the hole. 5327 // a3: Start of elements in FixedArray. 5328 // t1: Number of elements to fill. 5329 Label loop; 5330 __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes. 5331 __ addu(t1, t1, a3); // Point past last element to store. 5332 __ bind(&loop); 5333 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem. 5334 __ sw(a2, MemOperand(a3)); 5335 __ Branch(&loop, USE_DELAY_SLOT); 5336 __ addiu(a3, a3, kPointerSize); // In branch delay slot. 5337 5338 __ bind(&done); 5339 __ DropAndRet(3); 5340 5341 __ bind(&slowcase); 5342 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 5343} 5344 5345 5346static void GenerateRecordCallTarget(MacroAssembler* masm) { 5347 // Cache the called function in a global property cell. Cache states 5348 // are uninitialized, monomorphic (indicated by a JSFunction), and 5349 // megamorphic. 5350 // a1 : the function to call 5351 // a2 : cache cell for call target 5352 Label done; 5353 5354 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), 5355 masm->isolate()->heap()->undefined_value()); 5356 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), 5357 masm->isolate()->heap()->the_hole_value()); 5358 5359 // Load the cache state into a3. 5360 __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); 5361 5362 // A monomorphic cache hit or an already megamorphic state: invoke the 5363 // function without changing the state. 5364 __ Branch(&done, eq, a3, Operand(a1)); 5365 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 5366 __ Branch(&done, eq, a3, Operand(at)); 5367 5368 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 5369 // megamorphic. 5370 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5371 5372 __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at)); 5373 // An uninitialized cache is patched with the function. 5374 // Store a1 in the delay slot. This may or may not get overwritten depending 5375 // on the result of the comparison. 5376 __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); 5377 // No need for a write barrier here - cells are rescanned. 5378 5379 // MegamorphicSentinel is an immortal immovable object (undefined) so no 5380 // write-barrier is needed. 5381 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 5382 __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); 5383 5384 __ bind(&done); 5385} 5386 5387 5388void CallFunctionStub::Generate(MacroAssembler* masm) { 5389 // a1 : the function to call 5390 // a2 : cache cell for call target 5391 Label slow, non_function; 5392 5393 // The receiver might implicitly be the global object. This is 5394 // indicated by passing the hole as the receiver to the call 5395 // function stub. 5396 if (ReceiverMightBeImplicit()) { 5397 Label call; 5398 // Get the receiver from the stack. 5399 // function, receiver [, arguments] 5400 __ lw(t0, MemOperand(sp, argc_ * kPointerSize)); 5401 // Call as function is indicated with the hole. 5402 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5403 __ Branch(&call, ne, t0, Operand(at)); 5404 // Patch the receiver on the stack with the global receiver object. 5405 __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 5406 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); 5407 __ sw(a2, MemOperand(sp, argc_ * kPointerSize)); 5408 __ bind(&call); 5409 } 5410 5411 // Check that the function is really a JavaScript function. 5412 // a1: pushed function (to be verified) 5413 __ JumpIfSmi(a1, &non_function); 5414 // Get the map of the function object. 5415 __ GetObjectType(a1, a2, a2); 5416 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); 5417 5418 // Fast-case: Invoke the function now. 5419 // a1: pushed function 5420 ParameterCount actual(argc_); 5421 5422 if (ReceiverMightBeImplicit()) { 5423 Label call_as_function; 5424 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 5425 __ Branch(&call_as_function, eq, t0, Operand(at)); 5426 __ InvokeFunction(a1, 5427 actual, 5428 JUMP_FUNCTION, 5429 NullCallWrapper(), 5430 CALL_AS_METHOD); 5431 __ bind(&call_as_function); 5432 } 5433 __ InvokeFunction(a1, 5434 actual, 5435 JUMP_FUNCTION, 5436 NullCallWrapper(), 5437 CALL_AS_FUNCTION); 5438 5439 // Slow-case: Non-function called. 5440 __ bind(&slow); 5441 // Check for function proxy. 5442 __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE)); 5443 __ push(a1); // Put proxy as additional argument. 5444 __ li(a0, Operand(argc_ + 1, RelocInfo::NONE)); 5445 __ li(a2, Operand(0, RelocInfo::NONE)); 5446 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY); 5447 __ SetCallKind(t1, CALL_AS_METHOD); 5448 { 5449 Handle<Code> adaptor = 5450 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); 5451 __ Jump(adaptor, RelocInfo::CODE_TARGET); 5452 } 5453 5454 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead 5455 // of the original receiver from the call site). 5456 __ bind(&non_function); 5457 __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); 5458 __ li(a0, Operand(argc_)); // Set up the number of arguments. 5459 __ mov(a2, zero_reg); 5460 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); 5461 __ SetCallKind(t1, CALL_AS_METHOD); 5462 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5463 RelocInfo::CODE_TARGET); 5464} 5465 5466 5467void CallConstructStub::Generate(MacroAssembler* masm) { 5468 // a0 : number of arguments 5469 // a1 : the function to call 5470 // a2 : cache cell for call target 5471 Label slow, non_function_call; 5472 5473 // Check that the function is not a smi. 5474 __ JumpIfSmi(a1, &non_function_call); 5475 // Check that the function is a JSFunction. 5476 __ GetObjectType(a1, a3, a3); 5477 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE)); 5478 5479 if (RecordCallTarget()) { 5480 GenerateRecordCallTarget(masm); 5481 } 5482 5483 // Jump to the function-specific construct stub. 5484 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 5485 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset)); 5486 __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); 5487 __ Jump(at); 5488 5489 // a0: number of arguments 5490 // a1: called object 5491 // a3: object type 5492 Label do_call; 5493 __ bind(&slow); 5494 __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE)); 5495 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); 5496 __ jmp(&do_call); 5497 5498 __ bind(&non_function_call); 5499 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); 5500 __ bind(&do_call); 5501 // Set expected number of arguments to zero (not changing r0). 5502 __ li(a2, Operand(0, RelocInfo::NONE)); 5503 __ SetCallKind(t1, CALL_AS_METHOD); 5504 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), 5505 RelocInfo::CODE_TARGET); 5506} 5507 5508 5509// Unfortunately you have to run without snapshots to see most of these 5510// names in the profile since most compare stubs end up in the snapshot. 5511void CompareStub::PrintName(StringStream* stream) { 5512 ASSERT((lhs_.is(a0) && rhs_.is(a1)) || 5513 (lhs_.is(a1) && rhs_.is(a0))); 5514 const char* cc_name; 5515 switch (cc_) { 5516 case lt: cc_name = "LT"; break; 5517 case gt: cc_name = "GT"; break; 5518 case le: cc_name = "LE"; break; 5519 case ge: cc_name = "GE"; break; 5520 case eq: cc_name = "EQ"; break; 5521 case ne: cc_name = "NE"; break; 5522 default: cc_name = "UnknownCondition"; break; 5523 } 5524 bool is_equality = cc_ == eq || cc_ == ne; 5525 stream->Add("CompareStub_%s", cc_name); 5526 stream->Add(lhs_.is(a0) ? "_a0" : "_a1"); 5527 stream->Add(rhs_.is(a0) ? "_a0" : "_a1"); 5528 if (strict_ && is_equality) stream->Add("_STRICT"); 5529 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); 5530 if (!include_number_compare_) stream->Add("_NO_NUMBER"); 5531 if (!include_smi_compare_) stream->Add("_NO_SMI"); 5532} 5533 5534 5535int CompareStub::MinorKey() { 5536 // Encode the two parameters in a unique 16 bit value. 5537 ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); 5538 ASSERT((lhs_.is(a0) && rhs_.is(a1)) || 5539 (lhs_.is(a1) && rhs_.is(a0))); 5540 return ConditionField::encode(static_cast<unsigned>(cc_)) 5541 | RegisterField::encode(lhs_.is(a0)) 5542 | StrictField::encode(strict_) 5543 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) 5544 | IncludeSmiCompareField::encode(include_smi_compare_); 5545} 5546 5547 5548// StringCharCodeAtGenerator. 5549void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 5550 Label flat_string; 5551 Label ascii_string; 5552 Label got_char_code; 5553 Label sliced_string; 5554 5555 ASSERT(!t0.is(index_)); 5556 ASSERT(!t0.is(result_)); 5557 ASSERT(!t0.is(object_)); 5558 5559 // If the receiver is a smi trigger the non-string case. 5560 __ JumpIfSmi(object_, receiver_not_string_); 5561 5562 // Fetch the instance type of the receiver into result register. 5563 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 5564 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 5565 // If the receiver is not a string trigger the non-string case. 5566 __ And(t0, result_, Operand(kIsNotStringMask)); 5567 __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg)); 5568 5569 // If the index is non-smi trigger the non-smi case. 5570 __ JumpIfNotSmi(index_, &index_not_smi_); 5571 5572 __ bind(&got_smi_index_); 5573 5574 // Check for index out of range. 5575 __ lw(t0, FieldMemOperand(object_, String::kLengthOffset)); 5576 __ Branch(index_out_of_range_, ls, t0, Operand(index_)); 5577 5578 __ sra(index_, index_, kSmiTagSize); 5579 5580 StringCharLoadGenerator::Generate(masm, 5581 object_, 5582 index_, 5583 result_, 5584 &call_runtime_); 5585 5586 __ sll(result_, result_, kSmiTagSize); 5587 __ bind(&exit_); 5588} 5589 5590 5591void StringCharCodeAtGenerator::GenerateSlow( 5592 MacroAssembler* masm, 5593 const RuntimeCallHelper& call_helper) { 5594 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); 5595 5596 // Index is not a smi. 5597 __ bind(&index_not_smi_); 5598 // If index is a heap number, try converting it to an integer. 5599 __ CheckMap(index_, 5600 result_, 5601 Heap::kHeapNumberMapRootIndex, 5602 index_not_number_, 5603 DONT_DO_SMI_CHECK); 5604 call_helper.BeforeCall(masm); 5605 // Consumed by runtime conversion function: 5606 __ Push(object_, index_); 5607 if (index_flags_ == STRING_INDEX_IS_NUMBER) { 5608 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); 5609 } else { 5610 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); 5611 // NumberToSmi discards numbers that are not exact integers. 5612 __ CallRuntime(Runtime::kNumberToSmi, 1); 5613 } 5614 5615 // Save the conversion result before the pop instructions below 5616 // have a chance to overwrite it. 5617 5618 __ Move(index_, v0); 5619 __ pop(object_); 5620 // Reload the instance type. 5621 __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 5622 __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 5623 call_helper.AfterCall(masm); 5624 // If index is still not a smi, it must be out of range. 5625 __ JumpIfNotSmi(index_, index_out_of_range_); 5626 // Otherwise, return to the fast path. 5627 __ Branch(&got_smi_index_); 5628 5629 // Call runtime. We get here when the receiver is a string and the 5630 // index is a number, but the code of getting the actual character 5631 // is too complex (e.g., when the string needs to be flattened). 5632 __ bind(&call_runtime_); 5633 call_helper.BeforeCall(masm); 5634 __ sll(index_, index_, kSmiTagSize); 5635 __ Push(object_, index_); 5636 __ CallRuntime(Runtime::kStringCharCodeAt, 2); 5637 5638 __ Move(result_, v0); 5639 5640 call_helper.AfterCall(masm); 5641 __ jmp(&exit_); 5642 5643 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); 5644} 5645 5646 5647// ------------------------------------------------------------------------- 5648// StringCharFromCodeGenerator 5649 5650void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { 5651 // Fast case of Heap::LookupSingleCharacterStringFromCode. 5652 5653 ASSERT(!t0.is(result_)); 5654 ASSERT(!t0.is(code_)); 5655 5656 STATIC_ASSERT(kSmiTag == 0); 5657 STATIC_ASSERT(kSmiShiftSize == 0); 5658 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); 5659 __ And(t0, 5660 code_, 5661 Operand(kSmiTagMask | 5662 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); 5663 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); 5664 5665 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); 5666 // At this point code register contains smi tagged ASCII char code. 5667 STATIC_ASSERT(kSmiTag == 0); 5668 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); 5669 __ Addu(result_, result_, t0); 5670 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); 5671 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); 5672 __ Branch(&slow_case_, eq, result_, Operand(t0)); 5673 __ bind(&exit_); 5674} 5675 5676 5677void StringCharFromCodeGenerator::GenerateSlow( 5678 MacroAssembler* masm, 5679 const RuntimeCallHelper& call_helper) { 5680 __ Abort("Unexpected fallthrough to CharFromCode slow case"); 5681 5682 __ bind(&slow_case_); 5683 call_helper.BeforeCall(masm); 5684 __ push(code_); 5685 __ CallRuntime(Runtime::kCharFromCode, 1); 5686 __ Move(result_, v0); 5687 5688 call_helper.AfterCall(masm); 5689 __ Branch(&exit_); 5690 5691 __ Abort("Unexpected fallthrough from CharFromCode slow case"); 5692} 5693 5694 5695// ------------------------------------------------------------------------- 5696// StringCharAtGenerator 5697 5698void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { 5699 char_code_at_generator_.GenerateFast(masm); 5700 char_from_code_generator_.GenerateFast(masm); 5701} 5702 5703 5704void StringCharAtGenerator::GenerateSlow( 5705 MacroAssembler* masm, 5706 const RuntimeCallHelper& call_helper) { 5707 char_code_at_generator_.GenerateSlow(masm, call_helper); 5708 char_from_code_generator_.GenerateSlow(masm, call_helper); 5709} 5710 5711 5712void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, 5713 Register dest, 5714 Register src, 5715 Register count, 5716 Register scratch, 5717 bool ascii) { 5718 Label loop; 5719 Label done; 5720 // This loop just copies one character at a time, as it is only used for 5721 // very short strings. 5722 if (!ascii) { 5723 __ addu(count, count, count); 5724 } 5725 __ Branch(&done, eq, count, Operand(zero_reg)); 5726 __ addu(count, dest, count); // Count now points to the last dest byte. 5727 5728 __ bind(&loop); 5729 __ lbu(scratch, MemOperand(src)); 5730 __ addiu(src, src, 1); 5731 __ sb(scratch, MemOperand(dest)); 5732 __ addiu(dest, dest, 1); 5733 __ Branch(&loop, lt, dest, Operand(count)); 5734 5735 __ bind(&done); 5736} 5737 5738 5739enum CopyCharactersFlags { 5740 COPY_ASCII = 1, 5741 DEST_ALWAYS_ALIGNED = 2 5742}; 5743 5744 5745void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, 5746 Register dest, 5747 Register src, 5748 Register count, 5749 Register scratch1, 5750 Register scratch2, 5751 Register scratch3, 5752 Register scratch4, 5753 Register scratch5, 5754 int flags) { 5755 bool ascii = (flags & COPY_ASCII) != 0; 5756 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; 5757 5758 if (dest_always_aligned && FLAG_debug_code) { 5759 // Check that destination is actually word aligned if the flag says 5760 // that it is. 5761 __ And(scratch4, dest, Operand(kPointerAlignmentMask)); 5762 __ Check(eq, 5763 "Destination of copy not aligned.", 5764 scratch4, 5765 Operand(zero_reg)); 5766 } 5767 5768 const int kReadAlignment = 4; 5769 const int kReadAlignmentMask = kReadAlignment - 1; 5770 // Ensure that reading an entire aligned word containing the last character 5771 // of a string will not read outside the allocated area (because we pad up 5772 // to kObjectAlignment). 5773 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); 5774 // Assumes word reads and writes are little endian. 5775 // Nothing to do for zero characters. 5776 Label done; 5777 5778 if (!ascii) { 5779 __ addu(count, count, count); 5780 } 5781 __ Branch(&done, eq, count, Operand(zero_reg)); 5782 5783 Label byte_loop; 5784 // Must copy at least eight bytes, otherwise just do it one byte at a time. 5785 __ Subu(scratch1, count, Operand(8)); 5786 __ Addu(count, dest, Operand(count)); 5787 Register limit = count; // Read until src equals this. 5788 __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg)); 5789 5790 if (!dest_always_aligned) { 5791 // Align dest by byte copying. Copies between zero and three bytes. 5792 __ And(scratch4, dest, Operand(kReadAlignmentMask)); 5793 Label dest_aligned; 5794 __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg)); 5795 Label aligned_loop; 5796 __ bind(&aligned_loop); 5797 __ lbu(scratch1, MemOperand(src)); 5798 __ addiu(src, src, 1); 5799 __ sb(scratch1, MemOperand(dest)); 5800 __ addiu(dest, dest, 1); 5801 __ addiu(scratch4, scratch4, 1); 5802 __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask)); 5803 __ bind(&dest_aligned); 5804 } 5805 5806 Label simple_loop; 5807 5808 __ And(scratch4, src, Operand(kReadAlignmentMask)); 5809 __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg)); 5810 5811 // Loop for src/dst that are not aligned the same way. 5812 // This loop uses lwl and lwr instructions. These instructions 5813 // depend on the endianness, and the implementation assumes little-endian. 5814 { 5815 Label loop; 5816 __ bind(&loop); 5817 __ lwr(scratch1, MemOperand(src)); 5818 __ Addu(src, src, Operand(kReadAlignment)); 5819 __ lwl(scratch1, MemOperand(src, -1)); 5820 __ sw(scratch1, MemOperand(dest)); 5821 __ Addu(dest, dest, Operand(kReadAlignment)); 5822 __ Subu(scratch2, limit, dest); 5823 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); 5824 } 5825 5826 __ Branch(&byte_loop); 5827 5828 // Simple loop. 5829 // Copy words from src to dest, until less than four bytes left. 5830 // Both src and dest are word aligned. 5831 __ bind(&simple_loop); 5832 { 5833 Label loop; 5834 __ bind(&loop); 5835 __ lw(scratch1, MemOperand(src)); 5836 __ Addu(src, src, Operand(kReadAlignment)); 5837 __ sw(scratch1, MemOperand(dest)); 5838 __ Addu(dest, dest, Operand(kReadAlignment)); 5839 __ Subu(scratch2, limit, dest); 5840 __ Branch(&loop, ge, scratch2, Operand(kReadAlignment)); 5841 } 5842 5843 // Copy bytes from src to dest until dest hits limit. 5844 __ bind(&byte_loop); 5845 // Test if dest has already reached the limit. 5846 __ Branch(&done, ge, dest, Operand(limit)); 5847 __ lbu(scratch1, MemOperand(src)); 5848 __ addiu(src, src, 1); 5849 __ sb(scratch1, MemOperand(dest)); 5850 __ addiu(dest, dest, 1); 5851 __ Branch(&byte_loop); 5852 5853 __ bind(&done); 5854} 5855 5856 5857void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, 5858 Register c1, 5859 Register c2, 5860 Register scratch1, 5861 Register scratch2, 5862 Register scratch3, 5863 Register scratch4, 5864 Register scratch5, 5865 Label* not_found) { 5866 // Register scratch3 is the general scratch register in this function. 5867 Register scratch = scratch3; 5868 5869 // Make sure that both characters are not digits as such strings has a 5870 // different hash algorithm. Don't try to look for these in the symbol table. 5871 Label not_array_index; 5872 __ Subu(scratch, c1, Operand(static_cast<int>('0'))); 5873 __ Branch(¬_array_index, 5874 Ugreater, 5875 scratch, 5876 Operand(static_cast<int>('9' - '0'))); 5877 __ Subu(scratch, c2, Operand(static_cast<int>('0'))); 5878 5879 // If check failed combine both characters into single halfword. 5880 // This is required by the contract of the method: code at the 5881 // not_found branch expects this combination in c1 register. 5882 Label tmp; 5883 __ sll(scratch1, c2, kBitsPerByte); 5884 __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0'))); 5885 __ Or(c1, c1, scratch1); 5886 __ bind(&tmp); 5887 __ Branch( 5888 not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0'))); 5889 5890 __ bind(¬_array_index); 5891 // Calculate the two character string hash. 5892 Register hash = scratch1; 5893 StringHelper::GenerateHashInit(masm, hash, c1); 5894 StringHelper::GenerateHashAddCharacter(masm, hash, c2); 5895 StringHelper::GenerateHashGetHash(masm, hash); 5896 5897 // Collect the two characters in a register. 5898 Register chars = c1; 5899 __ sll(scratch, c2, kBitsPerByte); 5900 __ Or(chars, chars, scratch); 5901 5902 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 5903 // hash: hash of two character string. 5904 5905 // Load symbol table. 5906 // Load address of first element of the symbol table. 5907 Register symbol_table = c2; 5908 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); 5909 5910 Register undefined = scratch4; 5911 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); 5912 5913 // Calculate capacity mask from the symbol table capacity. 5914 Register mask = scratch2; 5915 __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); 5916 __ sra(mask, mask, 1); 5917 __ Addu(mask, mask, -1); 5918 5919 // Calculate untagged address of the first element of the symbol table. 5920 Register first_symbol_table_element = symbol_table; 5921 __ Addu(first_symbol_table_element, symbol_table, 5922 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); 5923 5924 // Registers. 5925 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. 5926 // hash: hash of two character string 5927 // mask: capacity mask 5928 // first_symbol_table_element: address of the first element of 5929 // the symbol table 5930 // undefined: the undefined object 5931 // scratch: - 5932 5933 // Perform a number of probes in the symbol table. 5934 const int kProbes = 4; 5935 Label found_in_symbol_table; 5936 Label next_probe[kProbes]; 5937 Register candidate = scratch5; // Scratch register contains candidate. 5938 for (int i = 0; i < kProbes; i++) { 5939 // Calculate entry in symbol table. 5940 if (i > 0) { 5941 __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); 5942 } else { 5943 __ mov(candidate, hash); 5944 } 5945 5946 __ And(candidate, candidate, Operand(mask)); 5947 5948 // Load the entry from the symble table. 5949 STATIC_ASSERT(SymbolTable::kEntrySize == 1); 5950 __ sll(scratch, candidate, kPointerSizeLog2); 5951 __ Addu(scratch, scratch, first_symbol_table_element); 5952 __ lw(candidate, MemOperand(scratch)); 5953 5954 // If entry is undefined no string with this hash can be found. 5955 Label is_string; 5956 __ GetObjectType(candidate, scratch, scratch); 5957 __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE)); 5958 5959 __ Branch(not_found, eq, undefined, Operand(candidate)); 5960 // Must be the hole (deleted entry). 5961 if (FLAG_debug_code) { 5962 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 5963 __ Assert(eq, "oddball in symbol table is not undefined or the hole", 5964 scratch, Operand(candidate)); 5965 } 5966 __ jmp(&next_probe[i]); 5967 5968 __ bind(&is_string); 5969 5970 // Check that the candidate is a non-external ASCII string. The instance 5971 // type is still in the scratch register from the CompareObjectType 5972 // operation. 5973 __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); 5974 5975 // If length is not 2 the string is not a candidate. 5976 __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset)); 5977 __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2))); 5978 5979 // Check if the two characters match. 5980 // Assumes that word load is little endian. 5981 __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); 5982 __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch)); 5983 __ bind(&next_probe[i]); 5984 } 5985 5986 // No matching 2 character string found by probing. 5987 __ jmp(not_found); 5988 5989 // Scratch register contains result when we fall through to here. 5990 Register result = candidate; 5991 __ bind(&found_in_symbol_table); 5992 __ mov(v0, result); 5993} 5994 5995 5996void StringHelper::GenerateHashInit(MacroAssembler* masm, 5997 Register hash, 5998 Register character) { 5999 // hash = seed + character + ((seed + character) << 10); 6000 __ LoadRoot(hash, Heap::kHashSeedRootIndex); 6001 // Untag smi seed and add the character. 6002 __ SmiUntag(hash); 6003 __ addu(hash, hash, character); 6004 __ sll(at, hash, 10); 6005 __ addu(hash, hash, at); 6006 // hash ^= hash >> 6; 6007 __ srl(at, hash, 6); 6008 __ xor_(hash, hash, at); 6009} 6010 6011 6012void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, 6013 Register hash, 6014 Register character) { 6015 // hash += character; 6016 __ addu(hash, hash, character); 6017 // hash += hash << 10; 6018 __ sll(at, hash, 10); 6019 __ addu(hash, hash, at); 6020 // hash ^= hash >> 6; 6021 __ srl(at, hash, 6); 6022 __ xor_(hash, hash, at); 6023} 6024 6025 6026void StringHelper::GenerateHashGetHash(MacroAssembler* masm, 6027 Register hash) { 6028 // hash += hash << 3; 6029 __ sll(at, hash, 3); 6030 __ addu(hash, hash, at); 6031 // hash ^= hash >> 11; 6032 __ srl(at, hash, 11); 6033 __ xor_(hash, hash, at); 6034 // hash += hash << 15; 6035 __ sll(at, hash, 15); 6036 __ addu(hash, hash, at); 6037 6038 __ li(at, Operand(String::kHashBitMask)); 6039 __ and_(hash, hash, at); 6040 6041 // if (hash == 0) hash = 27; 6042 __ ori(at, zero_reg, StringHasher::kZeroHash); 6043 __ Movz(hash, at, hash); 6044} 6045 6046 6047void SubStringStub::Generate(MacroAssembler* masm) { 6048 Label runtime; 6049 // Stack frame on entry. 6050 // ra: return address 6051 // sp[0]: to 6052 // sp[4]: from 6053 // sp[8]: string 6054 6055 // This stub is called from the native-call %_SubString(...), so 6056 // nothing can be assumed about the arguments. It is tested that: 6057 // "string" is a sequential string, 6058 // both "from" and "to" are smis, and 6059 // 0 <= from <= to <= string.length. 6060 // If any of these assumptions fail, we call the runtime system. 6061 6062 const int kToOffset = 0 * kPointerSize; 6063 const int kFromOffset = 1 * kPointerSize; 6064 const int kStringOffset = 2 * kPointerSize; 6065 6066 __ lw(a2, MemOperand(sp, kToOffset)); 6067 __ lw(a3, MemOperand(sp, kFromOffset)); 6068 STATIC_ASSERT(kFromOffset == kToOffset + 4); 6069 STATIC_ASSERT(kSmiTag == 0); 6070 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 6071 6072 // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is 6073 // safe in this case. 6074 __ UntagAndJumpIfNotSmi(a2, a2, &runtime); 6075 __ UntagAndJumpIfNotSmi(a3, a3, &runtime); 6076 // Both a2 and a3 are untagged integers. 6077 6078 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. 6079 6080 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to. 6081 __ Subu(a2, a2, a3); 6082 6083 // Make sure first argument is a string. 6084 __ lw(v0, MemOperand(sp, kStringOffset)); 6085 __ JumpIfSmi(v0, &runtime); 6086 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); 6087 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); 6088 __ And(t0, a1, Operand(kIsNotStringMask)); 6089 6090 __ Branch(&runtime, ne, t0, Operand(zero_reg)); 6091 6092 // Short-cut for the case of trivial substring. 6093 Label return_v0; 6094 // v0: original string 6095 // a2: result string length 6096 __ lw(t0, FieldMemOperand(v0, String::kLengthOffset)); 6097 __ sra(t0, t0, 1); 6098 __ Branch(&return_v0, eq, a2, Operand(t0)); 6099 6100 6101 Label result_longer_than_two; 6102 // Check for special case of two character ASCII string, in which case 6103 // we do a lookup in the symbol table first. 6104 __ li(t0, 2); 6105 __ Branch(&result_longer_than_two, gt, a2, Operand(t0)); 6106 __ Branch(&runtime, lt, a2, Operand(t0)); 6107 6108 __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime); 6109 6110 // Get the two characters forming the sub string. 6111 __ Addu(v0, v0, Operand(a3)); 6112 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); 6113 __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1)); 6114 6115 // Try to lookup two character string in symbol table. 6116 Label make_two_character_string; 6117 StringHelper::GenerateTwoCharacterSymbolTableProbe( 6118 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string); 6119 __ jmp(&return_v0); 6120 6121 // a2: result string length. 6122 // a3: two characters combined into halfword in little endian byte order. 6123 __ bind(&make_two_character_string); 6124 __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime); 6125 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); 6126 __ jmp(&return_v0); 6127 6128 __ bind(&result_longer_than_two); 6129 6130 // Deal with different string types: update the index if necessary 6131 // and put the underlying string into t1. 6132 // v0: original string 6133 // a1: instance type 6134 // a2: length 6135 // a3: from index (untagged) 6136 Label underlying_unpacked, sliced_string, seq_or_external_string; 6137 // If the string is not indirect, it can only be sequential or external. 6138 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); 6139 STATIC_ASSERT(kIsIndirectStringMask != 0); 6140 __ And(t0, a1, Operand(kIsIndirectStringMask)); 6141 __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg)); 6142 // t0 is used as a scratch register and can be overwritten in either case. 6143 __ And(t0, a1, Operand(kSlicedNotConsMask)); 6144 __ Branch(&sliced_string, ne, t0, Operand(zero_reg)); 6145 // Cons string. Check whether it is flat, then fetch first part. 6146 __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset)); 6147 __ LoadRoot(t0, Heap::kEmptyStringRootIndex); 6148 __ Branch(&runtime, ne, t1, Operand(t0)); 6149 __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset)); 6150 // Update instance type. 6151 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); 6152 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); 6153 __ jmp(&underlying_unpacked); 6154 6155 __ bind(&sliced_string); 6156 // Sliced string. Fetch parent and correct start index by offset. 6157 __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset)); 6158 __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset)); 6159 __ sra(t0, t0, 1); // Add offset to index. 6160 __ Addu(a3, a3, t0); 6161 // Update instance type. 6162 __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset)); 6163 __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); 6164 __ jmp(&underlying_unpacked); 6165 6166 __ bind(&seq_or_external_string); 6167 // Sequential or external string. Just move string to the expected register. 6168 __ mov(t1, v0); 6169 6170 __ bind(&underlying_unpacked); 6171 6172 if (FLAG_string_slices) { 6173 Label copy_routine; 6174 // t1: underlying subject string 6175 // a1: instance type of underlying subject string 6176 // a2: length 6177 // a3: adjusted start index (untagged) 6178 // Short slice. Copy instead of slicing. 6179 __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength)); 6180 // Allocate new sliced string. At this point we do not reload the instance 6181 // type including the string encoding because we simply rely on the info 6182 // provided by the original string. It does not matter if the original 6183 // string's encoding is wrong because we always have to recheck encoding of 6184 // the newly created string's parent anyways due to externalized strings. 6185 Label two_byte_slice, set_slice_header; 6186 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); 6187 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 6188 __ And(t0, a1, Operand(kStringEncodingMask)); 6189 __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg)); 6190 __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime); 6191 __ jmp(&set_slice_header); 6192 __ bind(&two_byte_slice); 6193 __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime); 6194 __ bind(&set_slice_header); 6195 __ sll(a3, a3, 1); 6196 __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset)); 6197 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset)); 6198 __ jmp(&return_v0); 6199 6200 __ bind(©_routine); 6201 } 6202 6203 // t1: underlying subject string 6204 // a1: instance type of underlying subject string 6205 // a2: length 6206 // a3: adjusted start index (untagged) 6207 Label two_byte_sequential, sequential_string, allocate_result; 6208 STATIC_ASSERT(kExternalStringTag != 0); 6209 STATIC_ASSERT(kSeqStringTag == 0); 6210 __ And(t0, a1, Operand(kExternalStringTag)); 6211 __ Branch(&sequential_string, eq, t0, Operand(zero_reg)); 6212 6213 // Handle external string. 6214 // Rule out short external strings. 6215 STATIC_CHECK(kShortExternalStringTag != 0); 6216 __ And(t0, a1, Operand(kShortExternalStringTag)); 6217 __ Branch(&runtime, ne, t0, Operand(zero_reg)); 6218 __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset)); 6219 // t1 already points to the first character of underlying string. 6220 __ jmp(&allocate_result); 6221 6222 __ bind(&sequential_string); 6223 // Locate first character of underlying subject string. 6224 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); 6225 __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 6226 6227 __ bind(&allocate_result); 6228 // Sequential acii string. Allocate the result. 6229 STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); 6230 __ And(t0, a1, Operand(kStringEncodingMask)); 6231 __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg)); 6232 6233 // Allocate and copy the resulting ASCII string. 6234 __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime); 6235 6236 // Locate first character of substring to copy. 6237 __ Addu(t1, t1, a3); 6238 6239 // Locate first character of result. 6240 __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 6241 6242 // v0: result string 6243 // a1: first character of result string 6244 // a2: result string length 6245 // t1: first character of substring to copy 6246 STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); 6247 StringHelper::GenerateCopyCharactersLong( 6248 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED); 6249 __ jmp(&return_v0); 6250 6251 // Allocate and copy the resulting two-byte string. 6252 __ bind(&two_byte_sequential); 6253 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime); 6254 6255 // Locate first character of substring to copy. 6256 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 6257 __ sll(t0, a3, 1); 6258 __ Addu(t1, t1, t0); 6259 // Locate first character of result. 6260 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 6261 6262 // v0: result string. 6263 // a1: first character of result. 6264 // a2: result length. 6265 // t1: first character of substring to copy. 6266 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 6267 StringHelper::GenerateCopyCharactersLong( 6268 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED); 6269 6270 __ bind(&return_v0); 6271 Counters* counters = masm->isolate()->counters(); 6272 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0); 6273 __ DropAndRet(3); 6274 6275 // Just jump to runtime to create the sub string. 6276 __ bind(&runtime); 6277 __ TailCallRuntime(Runtime::kSubString, 3, 1); 6278} 6279 6280 6281void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, 6282 Register left, 6283 Register right, 6284 Register scratch1, 6285 Register scratch2, 6286 Register scratch3) { 6287 Register length = scratch1; 6288 6289 // Compare lengths. 6290 Label strings_not_equal, check_zero_length; 6291 __ lw(length, FieldMemOperand(left, String::kLengthOffset)); 6292 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); 6293 __ Branch(&check_zero_length, eq, length, Operand(scratch2)); 6294 __ bind(&strings_not_equal); 6295 __ li(v0, Operand(Smi::FromInt(NOT_EQUAL))); 6296 __ Ret(); 6297 6298 // Check if the length is zero. 6299 Label compare_chars; 6300 __ bind(&check_zero_length); 6301 STATIC_ASSERT(kSmiTag == 0); 6302 __ Branch(&compare_chars, ne, length, Operand(zero_reg)); 6303 __ li(v0, Operand(Smi::FromInt(EQUAL))); 6304 __ Ret(); 6305 6306 // Compare characters. 6307 __ bind(&compare_chars); 6308 6309 GenerateAsciiCharsCompareLoop(masm, 6310 left, right, length, scratch2, scratch3, v0, 6311 &strings_not_equal); 6312 6313 // Characters are equal. 6314 __ li(v0, Operand(Smi::FromInt(EQUAL))); 6315 __ Ret(); 6316} 6317 6318 6319void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, 6320 Register left, 6321 Register right, 6322 Register scratch1, 6323 Register scratch2, 6324 Register scratch3, 6325 Register scratch4) { 6326 Label result_not_equal, compare_lengths; 6327 // Find minimum length and length difference. 6328 __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset)); 6329 __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset)); 6330 __ Subu(scratch3, scratch1, Operand(scratch2)); 6331 Register length_delta = scratch3; 6332 __ slt(scratch4, scratch2, scratch1); 6333 __ Movn(scratch1, scratch2, scratch4); 6334 Register min_length = scratch1; 6335 STATIC_ASSERT(kSmiTag == 0); 6336 __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); 6337 6338 // Compare loop. 6339 GenerateAsciiCharsCompareLoop(masm, 6340 left, right, min_length, scratch2, scratch4, v0, 6341 &result_not_equal); 6342 6343 // Compare lengths - strings up to min-length are equal. 6344 __ bind(&compare_lengths); 6345 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); 6346 // Use length_delta as result if it's zero. 6347 __ mov(scratch2, length_delta); 6348 __ mov(scratch4, zero_reg); 6349 __ mov(v0, zero_reg); 6350 6351 __ bind(&result_not_equal); 6352 // Conditionally update the result based either on length_delta or 6353 // the last comparion performed in the loop above. 6354 Label ret; 6355 __ Branch(&ret, eq, scratch2, Operand(scratch4)); 6356 __ li(v0, Operand(Smi::FromInt(GREATER))); 6357 __ Branch(&ret, gt, scratch2, Operand(scratch4)); 6358 __ li(v0, Operand(Smi::FromInt(LESS))); 6359 __ bind(&ret); 6360 __ Ret(); 6361} 6362 6363 6364void StringCompareStub::GenerateAsciiCharsCompareLoop( 6365 MacroAssembler* masm, 6366 Register left, 6367 Register right, 6368 Register length, 6369 Register scratch1, 6370 Register scratch2, 6371 Register scratch3, 6372 Label* chars_not_equal) { 6373 // Change index to run from -length to -1 by adding length to string 6374 // start. This means that loop ends when index reaches zero, which 6375 // doesn't need an additional compare. 6376 __ SmiUntag(length); 6377 __ Addu(scratch1, length, 6378 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 6379 __ Addu(left, left, Operand(scratch1)); 6380 __ Addu(right, right, Operand(scratch1)); 6381 __ Subu(length, zero_reg, length); 6382 Register index = length; // index = -length; 6383 6384 6385 // Compare loop. 6386 Label loop; 6387 __ bind(&loop); 6388 __ Addu(scratch3, left, index); 6389 __ lbu(scratch1, MemOperand(scratch3)); 6390 __ Addu(scratch3, right, index); 6391 __ lbu(scratch2, MemOperand(scratch3)); 6392 __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2)); 6393 __ Addu(index, index, 1); 6394 __ Branch(&loop, ne, index, Operand(zero_reg)); 6395} 6396 6397 6398void StringCompareStub::Generate(MacroAssembler* masm) { 6399 Label runtime; 6400 6401 Counters* counters = masm->isolate()->counters(); 6402 6403 // Stack frame on entry. 6404 // sp[0]: right string 6405 // sp[4]: left string 6406 __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left. 6407 __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right. 6408 6409 Label not_same; 6410 __ Branch(¬_same, ne, a0, Operand(a1)); 6411 STATIC_ASSERT(EQUAL == 0); 6412 STATIC_ASSERT(kSmiTag == 0); 6413 __ li(v0, Operand(Smi::FromInt(EQUAL))); 6414 __ IncrementCounter(counters->string_compare_native(), 1, a1, a2); 6415 __ DropAndRet(2); 6416 6417 __ bind(¬_same); 6418 6419 // Check that both objects are sequential ASCII strings. 6420 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime); 6421 6422 // Compare flat ASCII strings natively. Remove arguments from stack first. 6423 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); 6424 __ Addu(sp, sp, Operand(2 * kPointerSize)); 6425 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); 6426 6427 __ bind(&runtime); 6428 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6429} 6430 6431 6432void StringAddStub::Generate(MacroAssembler* masm) { 6433 Label call_runtime, call_builtin; 6434 Builtins::JavaScript builtin_id = Builtins::ADD; 6435 6436 Counters* counters = masm->isolate()->counters(); 6437 6438 // Stack on entry: 6439 // sp[0]: second argument (right). 6440 // sp[4]: first argument (left). 6441 6442 // Load the two arguments. 6443 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument. 6444 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument. 6445 6446 // Make sure that both arguments are strings if not known in advance. 6447 if (flags_ == NO_STRING_ADD_FLAGS) { 6448 __ JumpIfEitherSmi(a0, a1, &call_runtime); 6449 // Load instance types. 6450 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); 6451 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); 6452 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); 6453 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); 6454 STATIC_ASSERT(kStringTag == 0); 6455 // If either is not a string, go to runtime. 6456 __ Or(t4, t0, Operand(t1)); 6457 __ And(t4, t4, Operand(kIsNotStringMask)); 6458 __ Branch(&call_runtime, ne, t4, Operand(zero_reg)); 6459 } else { 6460 // Here at least one of the arguments is definitely a string. 6461 // We convert the one that is not known to be a string. 6462 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { 6463 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); 6464 GenerateConvertArgument( 6465 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin); 6466 builtin_id = Builtins::STRING_ADD_RIGHT; 6467 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { 6468 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); 6469 GenerateConvertArgument( 6470 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin); 6471 builtin_id = Builtins::STRING_ADD_LEFT; 6472 } 6473 } 6474 6475 // Both arguments are strings. 6476 // a0: first string 6477 // a1: second string 6478 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 6479 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 6480 { 6481 Label strings_not_empty; 6482 // Check if either of the strings are empty. In that case return the other. 6483 // These tests use zero-length check on string-length whch is an Smi. 6484 // Assert that Smi::FromInt(0) is really 0. 6485 STATIC_ASSERT(kSmiTag == 0); 6486 ASSERT(Smi::FromInt(0) == 0); 6487 __ lw(a2, FieldMemOperand(a0, String::kLengthOffset)); 6488 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset)); 6489 __ mov(v0, a0); // Assume we'll return first string (from a0). 6490 __ Movz(v0, a1, a2); // If first is empty, return second (from a1). 6491 __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1. 6492 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1. 6493 __ and_(t4, t4, t5); // Branch if both strings were non-empty. 6494 __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg)); 6495 6496 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); 6497 __ DropAndRet(2); 6498 6499 __ bind(&strings_not_empty); 6500 } 6501 6502 // Untag both string-lengths. 6503 __ sra(a2, a2, kSmiTagSize); 6504 __ sra(a3, a3, kSmiTagSize); 6505 6506 // Both strings are non-empty. 6507 // a0: first string 6508 // a1: second string 6509 // a2: length of first string 6510 // a3: length of second string 6511 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 6512 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 6513 // Look at the length of the result of adding the two strings. 6514 Label string_add_flat_result, longer_than_two; 6515 // Adding two lengths can't overflow. 6516 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); 6517 __ Addu(t2, a2, Operand(a3)); 6518 // Use the symbol table when adding two one character strings, as it 6519 // helps later optimizations to return a symbol here. 6520 __ Branch(&longer_than_two, ne, t2, Operand(2)); 6521 6522 // Check that both strings are non-external ASCII strings. 6523 if (flags_ != NO_STRING_ADD_FLAGS) { 6524 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); 6525 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); 6526 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); 6527 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); 6528 } 6529 __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3, 6530 &call_runtime); 6531 6532 // Get the two characters forming the sub string. 6533 __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize)); 6534 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize)); 6535 6536 // Try to lookup two character string in symbol table. If it is not found 6537 // just allocate a new one. 6538 Label make_two_character_string; 6539 StringHelper::GenerateTwoCharacterSymbolTableProbe( 6540 masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string); 6541 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); 6542 __ DropAndRet(2); 6543 6544 __ bind(&make_two_character_string); 6545 // Resulting string has length 2 and first chars of two strings 6546 // are combined into single halfword in a2 register. 6547 // So we can fill resulting string without two loops by a single 6548 // halfword store instruction (which assumes that processor is 6549 // in a little endian mode). 6550 __ li(t2, Operand(2)); 6551 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime); 6552 __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); 6553 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); 6554 __ DropAndRet(2); 6555 6556 __ bind(&longer_than_two); 6557 // Check if resulting string will be flat. 6558 __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength)); 6559 // Handle exceptionally long strings in the runtime system. 6560 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); 6561 ASSERT(IsPowerOf2(String::kMaxLength + 1)); 6562 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. 6563 __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1)); 6564 6565 // If result is not supposed to be flat, allocate a cons string object. 6566 // If both strings are ASCII the result is an ASCII cons string. 6567 if (flags_ != NO_STRING_ADD_FLAGS) { 6568 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); 6569 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); 6570 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); 6571 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); 6572 } 6573 Label non_ascii, allocated, ascii_data; 6574 STATIC_ASSERT(kTwoByteStringTag == 0); 6575 // Branch to non_ascii if either string-encoding field is zero (non-ASCII). 6576 __ And(t4, t0, Operand(t1)); 6577 __ And(t4, t4, Operand(kStringEncodingMask)); 6578 __ Branch(&non_ascii, eq, t4, Operand(zero_reg)); 6579 6580 // Allocate an ASCII cons string. 6581 __ bind(&ascii_data); 6582 __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime); 6583 __ bind(&allocated); 6584 // Fill the fields of the cons string. 6585 __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset)); 6586 __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset)); 6587 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); 6588 __ DropAndRet(2); 6589 6590 __ bind(&non_ascii); 6591 // At least one of the strings is two-byte. Check whether it happens 6592 // to contain only ASCII characters. 6593 // t0: first instance type. 6594 // t1: second instance type. 6595 // Branch to if _both_ instances have kAsciiDataHintMask set. 6596 __ And(at, t0, Operand(kAsciiDataHintMask)); 6597 __ and_(at, at, t1); 6598 __ Branch(&ascii_data, ne, at, Operand(zero_reg)); 6599 6600 __ xor_(t0, t0, t1); 6601 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); 6602 __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag)); 6603 __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag)); 6604 6605 // Allocate a two byte cons string. 6606 __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime); 6607 __ Branch(&allocated); 6608 6609 // We cannot encounter sliced strings or cons strings here since: 6610 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); 6611 // Handle creating a flat result from either external or sequential strings. 6612 // Locate the first characters' locations. 6613 // a0: first string 6614 // a1: second string 6615 // a2: length of first string 6616 // a3: length of second string 6617 // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) 6618 // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) 6619 // t2: sum of lengths. 6620 Label first_prepared, second_prepared; 6621 __ bind(&string_add_flat_result); 6622 if (flags_ != NO_STRING_ADD_FLAGS) { 6623 __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); 6624 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); 6625 __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); 6626 __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset)); 6627 } 6628 // Check whether both strings have same encoding 6629 __ Xor(t3, t0, Operand(t1)); 6630 __ And(t3, t3, Operand(kStringEncodingMask)); 6631 __ Branch(&call_runtime, ne, t3, Operand(zero_reg)); 6632 6633 STATIC_ASSERT(kSeqStringTag == 0); 6634 __ And(t4, t0, Operand(kStringRepresentationMask)); 6635 6636 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); 6637 Label skip_first_add; 6638 __ Branch(&skip_first_add, ne, t4, Operand(zero_reg)); 6639 __ Branch(USE_DELAY_SLOT, &first_prepared); 6640 __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag); 6641 __ bind(&skip_first_add); 6642 // External string: rule out short external string and load string resource. 6643 STATIC_ASSERT(kShortExternalStringTag != 0); 6644 __ And(t4, t0, Operand(kShortExternalStringMask)); 6645 __ Branch(&call_runtime, ne, t4, Operand(zero_reg)); 6646 __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset)); 6647 __ bind(&first_prepared); 6648 6649 STATIC_ASSERT(kSeqStringTag == 0); 6650 __ And(t4, t1, Operand(kStringRepresentationMask)); 6651 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); 6652 Label skip_second_add; 6653 __ Branch(&skip_second_add, ne, t4, Operand(zero_reg)); 6654 __ Branch(USE_DELAY_SLOT, &second_prepared); 6655 __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag); 6656 __ bind(&skip_second_add); 6657 // External string: rule out short external string and load string resource. 6658 STATIC_ASSERT(kShortExternalStringTag != 0); 6659 __ And(t4, t1, Operand(kShortExternalStringMask)); 6660 __ Branch(&call_runtime, ne, t4, Operand(zero_reg)); 6661 __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset)); 6662 __ bind(&second_prepared); 6663 6664 Label non_ascii_string_add_flat_result; 6665 // t3: first character of first string 6666 // a1: first character of second string 6667 // a2: length of first string 6668 // a3: length of second string 6669 // t2: sum of lengths. 6670 // Both strings have the same encoding. 6671 STATIC_ASSERT(kTwoByteStringTag == 0); 6672 __ And(t4, t1, Operand(kStringEncodingMask)); 6673 __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg)); 6674 6675 __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime); 6676 __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); 6677 // v0: result string. 6678 // t3: first character of first string. 6679 // a1: first character of second string 6680 // a2: length of first string. 6681 // a3: length of second string. 6682 // t2: first character of result. 6683 6684 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true); 6685 // t2: next character of result. 6686 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true); 6687 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); 6688 __ DropAndRet(2); 6689 6690 __ bind(&non_ascii_string_add_flat_result); 6691 __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime); 6692 __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 6693 // v0: result string. 6694 // t3: first character of first string. 6695 // a1: first character of second string. 6696 // a2: length of first string. 6697 // a3: length of second string. 6698 // t2: first character of result. 6699 StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false); 6700 // t2: next character of result. 6701 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false); 6702 6703 __ IncrementCounter(counters->string_add_native(), 1, a2, a3); 6704 __ DropAndRet(2); 6705 6706 // Just jump to runtime to add the two strings. 6707 __ bind(&call_runtime); 6708 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 6709 6710 if (call_builtin.is_linked()) { 6711 __ bind(&call_builtin); 6712 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); 6713 } 6714} 6715 6716 6717void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, 6718 int stack_offset, 6719 Register arg, 6720 Register scratch1, 6721 Register scratch2, 6722 Register scratch3, 6723 Register scratch4, 6724 Label* slow) { 6725 // First check if the argument is already a string. 6726 Label not_string, done; 6727 __ JumpIfSmi(arg, ¬_string); 6728 __ GetObjectType(arg, scratch1, scratch1); 6729 __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE)); 6730 6731 // Check the number to string cache. 6732 Label not_cached; 6733 __ bind(¬_string); 6734 // Puts the cached result into scratch1. 6735 NumberToStringStub::GenerateLookupNumberStringCache(masm, 6736 arg, 6737 scratch1, 6738 scratch2, 6739 scratch3, 6740 scratch4, 6741 false, 6742 ¬_cached); 6743 __ mov(arg, scratch1); 6744 __ sw(arg, MemOperand(sp, stack_offset)); 6745 __ jmp(&done); 6746 6747 // Check if the argument is a safe string wrapper. 6748 __ bind(¬_cached); 6749 __ JumpIfSmi(arg, slow); 6750 __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1. 6751 __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE)); 6752 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); 6753 __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf); 6754 __ And(scratch2, scratch2, scratch4); 6755 __ Branch(slow, ne, scratch2, Operand(scratch4)); 6756 __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset)); 6757 __ sw(arg, MemOperand(sp, stack_offset)); 6758 6759 __ bind(&done); 6760} 6761 6762 6763void ICCompareStub::GenerateSmis(MacroAssembler* masm) { 6764 ASSERT(state_ == CompareIC::SMIS); 6765 Label miss; 6766 __ Or(a2, a1, a0); 6767 __ JumpIfNotSmi(a2, &miss); 6768 6769 if (GetCondition() == eq) { 6770 // For equality we do not care about the sign of the result. 6771 __ Subu(v0, a0, a1); 6772 } else { 6773 // Untag before subtracting to avoid handling overflow. 6774 __ SmiUntag(a1); 6775 __ SmiUntag(a0); 6776 __ Subu(v0, a1, a0); 6777 } 6778 __ Ret(); 6779 6780 __ bind(&miss); 6781 GenerateMiss(masm); 6782} 6783 6784 6785void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { 6786 ASSERT(state_ == CompareIC::HEAP_NUMBERS); 6787 6788 Label generic_stub; 6789 Label unordered, maybe_undefined1, maybe_undefined2; 6790 Label miss; 6791 __ And(a2, a1, Operand(a0)); 6792 __ JumpIfSmi(a2, &generic_stub); 6793 6794 __ GetObjectType(a0, a2, a2); 6795 __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE)); 6796 __ GetObjectType(a1, a2, a2); 6797 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); 6798 6799 // Inlining the double comparison and falling back to the general compare 6800 // stub if NaN is involved or FPU is unsupported. 6801 if (CpuFeatures::IsSupported(FPU)) { 6802 CpuFeatures::Scope scope(FPU); 6803 6804 // Load left and right operand. 6805 __ Subu(a2, a1, Operand(kHeapObjectTag)); 6806 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); 6807 __ Subu(a2, a0, Operand(kHeapObjectTag)); 6808 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); 6809 6810 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. 6811 Label fpu_eq, fpu_lt; 6812 // Test if equal, and also handle the unordered/NaN case. 6813 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); 6814 6815 // Test if less (unordered case is already handled). 6816 __ BranchF(&fpu_lt, NULL, lt, f0, f2); 6817 6818 // Otherwise it's greater, so just fall thru, and return. 6819 __ li(v0, Operand(GREATER)); 6820 __ Ret(); 6821 6822 __ bind(&fpu_eq); 6823 __ li(v0, Operand(EQUAL)); 6824 __ Ret(); 6825 6826 __ bind(&fpu_lt); 6827 __ li(v0, Operand(LESS)); 6828 __ Ret(); 6829 } 6830 6831 __ bind(&unordered); 6832 6833 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); 6834 __ bind(&generic_stub); 6835 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 6836 6837 __ bind(&maybe_undefined1); 6838 if (Token::IsOrderedRelationalCompareOp(op_)) { 6839 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 6840 __ Branch(&miss, ne, a0, Operand(at)); 6841 __ GetObjectType(a1, a2, a2); 6842 __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); 6843 __ jmp(&unordered); 6844 } 6845 6846 __ bind(&maybe_undefined2); 6847 if (Token::IsOrderedRelationalCompareOp(op_)) { 6848 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 6849 __ Branch(&unordered, eq, a1, Operand(at)); 6850 } 6851 6852 __ bind(&miss); 6853 GenerateMiss(masm); 6854} 6855 6856 6857void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { 6858 ASSERT(state_ == CompareIC::SYMBOLS); 6859 Label miss; 6860 6861 // Registers containing left and right operands respectively. 6862 Register left = a1; 6863 Register right = a0; 6864 Register tmp1 = a2; 6865 Register tmp2 = a3; 6866 6867 // Check that both operands are heap objects. 6868 __ JumpIfEitherSmi(left, right, &miss); 6869 6870 // Check that both operands are symbols. 6871 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); 6872 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); 6873 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); 6874 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); 6875 STATIC_ASSERT(kSymbolTag != 0); 6876 __ And(tmp1, tmp1, Operand(tmp2)); 6877 __ And(tmp1, tmp1, kIsSymbolMask); 6878 __ Branch(&miss, eq, tmp1, Operand(zero_reg)); 6879 // Make sure a0 is non-zero. At this point input operands are 6880 // guaranteed to be non-zero. 6881 ASSERT(right.is(a0)); 6882 STATIC_ASSERT(EQUAL == 0); 6883 STATIC_ASSERT(kSmiTag == 0); 6884 __ mov(v0, right); 6885 // Symbols are compared by identity. 6886 __ Ret(ne, left, Operand(right)); 6887 __ li(v0, Operand(Smi::FromInt(EQUAL))); 6888 __ Ret(); 6889 6890 __ bind(&miss); 6891 GenerateMiss(masm); 6892} 6893 6894 6895void ICCompareStub::GenerateStrings(MacroAssembler* masm) { 6896 ASSERT(state_ == CompareIC::STRINGS); 6897 Label miss; 6898 6899 bool equality = Token::IsEqualityOp(op_); 6900 6901 // Registers containing left and right operands respectively. 6902 Register left = a1; 6903 Register right = a0; 6904 Register tmp1 = a2; 6905 Register tmp2 = a3; 6906 Register tmp3 = t0; 6907 Register tmp4 = t1; 6908 Register tmp5 = t2; 6909 6910 // Check that both operands are heap objects. 6911 __ JumpIfEitherSmi(left, right, &miss); 6912 6913 // Check that both operands are strings. This leaves the instance 6914 // types loaded in tmp1 and tmp2. 6915 __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); 6916 __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); 6917 __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); 6918 __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); 6919 STATIC_ASSERT(kNotStringTag != 0); 6920 __ Or(tmp3, tmp1, tmp2); 6921 __ And(tmp5, tmp3, Operand(kIsNotStringMask)); 6922 __ Branch(&miss, ne, tmp5, Operand(zero_reg)); 6923 6924 // Fast check for identical strings. 6925 Label left_ne_right; 6926 STATIC_ASSERT(EQUAL == 0); 6927 STATIC_ASSERT(kSmiTag == 0); 6928 __ Branch(&left_ne_right, ne, left, Operand(right)); 6929 __ Ret(USE_DELAY_SLOT); 6930 __ mov(v0, zero_reg); // In the delay slot. 6931 __ bind(&left_ne_right); 6932 6933 // Handle not identical strings. 6934 6935 // Check that both strings are symbols. If they are, we're done 6936 // because we already know they are not identical. 6937 if (equality) { 6938 ASSERT(GetCondition() == eq); 6939 STATIC_ASSERT(kSymbolTag != 0); 6940 __ And(tmp3, tmp1, Operand(tmp2)); 6941 __ And(tmp5, tmp3, Operand(kIsSymbolMask)); 6942 Label is_symbol; 6943 __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg)); 6944 // Make sure a0 is non-zero. At this point input operands are 6945 // guaranteed to be non-zero. 6946 ASSERT(right.is(a0)); 6947 __ Ret(USE_DELAY_SLOT); 6948 __ mov(v0, a0); // In the delay slot. 6949 __ bind(&is_symbol); 6950 } 6951 6952 // Check that both strings are sequential ASCII. 6953 Label runtime; 6954 __ JumpIfBothInstanceTypesAreNotSequentialAscii( 6955 tmp1, tmp2, tmp3, tmp4, &runtime); 6956 6957 // Compare flat ASCII strings. Returns when done. 6958 if (equality) { 6959 StringCompareStub::GenerateFlatAsciiStringEquals( 6960 masm, left, right, tmp1, tmp2, tmp3); 6961 } else { 6962 StringCompareStub::GenerateCompareFlatAsciiStrings( 6963 masm, left, right, tmp1, tmp2, tmp3, tmp4); 6964 } 6965 6966 // Handle more complex cases in runtime. 6967 __ bind(&runtime); 6968 __ Push(left, right); 6969 if (equality) { 6970 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); 6971 } else { 6972 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); 6973 } 6974 6975 __ bind(&miss); 6976 GenerateMiss(masm); 6977} 6978 6979 6980void ICCompareStub::GenerateObjects(MacroAssembler* masm) { 6981 ASSERT(state_ == CompareIC::OBJECTS); 6982 Label miss; 6983 __ And(a2, a1, Operand(a0)); 6984 __ JumpIfSmi(a2, &miss); 6985 6986 __ GetObjectType(a0, a2, a2); 6987 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); 6988 __ GetObjectType(a1, a2, a2); 6989 __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE)); 6990 6991 ASSERT(GetCondition() == eq); 6992 __ Ret(USE_DELAY_SLOT); 6993 __ subu(v0, a0, a1); 6994 6995 __ bind(&miss); 6996 GenerateMiss(masm); 6997} 6998 6999 7000void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { 7001 Label miss; 7002 __ And(a2, a1, a0); 7003 __ JumpIfSmi(a2, &miss); 7004 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); 7005 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); 7006 __ Branch(&miss, ne, a2, Operand(known_map_)); 7007 __ Branch(&miss, ne, a3, Operand(known_map_)); 7008 7009 __ Ret(USE_DELAY_SLOT); 7010 __ subu(v0, a0, a1); 7011 7012 __ bind(&miss); 7013 GenerateMiss(masm); 7014} 7015 7016void ICCompareStub::GenerateMiss(MacroAssembler* masm) { 7017 { 7018 // Call the runtime system in a fresh internal frame. 7019 ExternalReference miss = 7020 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); 7021 FrameScope scope(masm, StackFrame::INTERNAL); 7022 __ Push(a1, a0); 7023 __ push(ra); 7024 __ Push(a1, a0); 7025 __ li(t0, Operand(Smi::FromInt(op_))); 7026 __ addiu(sp, sp, -kPointerSize); 7027 __ CallExternalReference(miss, 3, USE_DELAY_SLOT); 7028 __ sw(t0, MemOperand(sp)); // In the delay slot. 7029 // Compute the entry point of the rewritten stub. 7030 __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); 7031 // Restore registers. 7032 __ Pop(a1, a0, ra); 7033 } 7034 __ Jump(a2); 7035} 7036 7037 7038void DirectCEntryStub::Generate(MacroAssembler* masm) { 7039 // No need to pop or drop anything, LeaveExitFrame will restore the old 7040 // stack, thus dropping the allocated space for the return value. 7041 // The saved ra is after the reserved stack space for the 4 args. 7042 __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); 7043 7044 if (FLAG_debug_code && FLAG_enable_slow_asserts) { 7045 // In case of an error the return address may point to a memory area 7046 // filled with kZapValue by the GC. 7047 // Dereference the address and check for this. 7048 __ lw(t0, MemOperand(t9)); 7049 __ Assert(ne, "Received invalid return address.", t0, 7050 Operand(reinterpret_cast<uint32_t>(kZapValue))); 7051 } 7052 __ Jump(t9); 7053} 7054 7055 7056void DirectCEntryStub::GenerateCall(MacroAssembler* masm, 7057 ExternalReference function) { 7058 __ li(t9, Operand(function)); 7059 this->GenerateCall(masm, t9); 7060} 7061 7062 7063void DirectCEntryStub::GenerateCall(MacroAssembler* masm, 7064 Register target) { 7065 __ Move(t9, target); 7066 __ AssertStackIsAligned(); 7067 // Allocate space for arg slots. 7068 __ Subu(sp, sp, kCArgsSlotsSize); 7069 7070 // Block the trampoline pool through the whole function to make sure the 7071 // number of generated instructions is constant. 7072 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); 7073 7074 // We need to get the current 'pc' value, which is not available on MIPS. 7075 Label find_ra; 7076 masm->bal(&find_ra); // ra = pc + 8. 7077 masm->nop(); // Branch delay slot nop. 7078 masm->bind(&find_ra); 7079 7080 const int kNumInstructionsToJump = 6; 7081 masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize); 7082 // Push return address (accessible to GC through exit frame pc). 7083 // This spot for ra was reserved in EnterExitFrame. 7084 masm->sw(ra, MemOperand(sp, kCArgsSlotsSize)); 7085 masm->li(ra, 7086 Operand(reinterpret_cast<intptr_t>(GetCode().location()), 7087 RelocInfo::CODE_TARGET), 7088 CONSTANT_SIZE); 7089 // Call the function. 7090 masm->Jump(t9); 7091 // Make sure the stored 'ra' points to this position. 7092 ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra)); 7093} 7094 7095 7096void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, 7097 Label* miss, 7098 Label* done, 7099 Register receiver, 7100 Register properties, 7101 Handle<String> name, 7102 Register scratch0) { 7103 // If names of slots in range from 1 to kProbes - 1 for the hash value are 7104 // not equal to the name and kProbes-th slot is not used (its name is the 7105 // undefined value), it guarantees the hash table doesn't contain the 7106 // property. It's true even if some slots represent deleted properties 7107 // (their names are the hole value). 7108 for (int i = 0; i < kInlinedProbes; i++) { 7109 // scratch0 points to properties hash. 7110 // Compute the masked index: (hash + i + i * i) & mask. 7111 Register index = scratch0; 7112 // Capacity is smi 2^n. 7113 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); 7114 __ Subu(index, index, Operand(1)); 7115 __ And(index, index, Operand( 7116 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); 7117 7118 // Scale the index by multiplying by the entry size. 7119 ASSERT(StringDictionary::kEntrySize == 3); 7120 __ sll(at, index, 1); 7121 __ Addu(index, index, at); 7122 7123 Register entity_name = scratch0; 7124 // Having undefined at this place means the name is not contained. 7125 ASSERT_EQ(kSmiTagSize, 1); 7126 Register tmp = properties; 7127 __ sll(scratch0, index, 1); 7128 __ Addu(tmp, properties, scratch0); 7129 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); 7130 7131 ASSERT(!tmp.is(entity_name)); 7132 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); 7133 __ Branch(done, eq, entity_name, Operand(tmp)); 7134 7135 if (i != kInlinedProbes - 1) { 7136 // Load the hole ready for use below: 7137 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); 7138 7139 // Stop if found the property. 7140 __ Branch(miss, eq, entity_name, Operand(Handle<String>(name))); 7141 7142 Label the_hole; 7143 __ Branch(&the_hole, eq, entity_name, Operand(tmp)); 7144 7145 // Check if the entry name is not a symbol. 7146 __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); 7147 __ lbu(entity_name, 7148 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); 7149 __ And(scratch0, entity_name, Operand(kIsSymbolMask)); 7150 __ Branch(miss, eq, scratch0, Operand(zero_reg)); 7151 7152 __ bind(&the_hole); 7153 7154 // Restore the properties. 7155 __ lw(properties, 7156 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 7157 } 7158 } 7159 7160 const int spill_mask = 7161 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() | 7162 a2.bit() | a1.bit() | a0.bit() | v0.bit()); 7163 7164 __ MultiPush(spill_mask); 7165 __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 7166 __ li(a1, Operand(Handle<String>(name))); 7167 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); 7168 __ CallStub(&stub); 7169 __ mov(at, v0); 7170 __ MultiPop(spill_mask); 7171 7172 __ Branch(done, eq, at, Operand(zero_reg)); 7173 __ Branch(miss, ne, at, Operand(zero_reg)); 7174} 7175 7176 7177// Probe the string dictionary in the |elements| register. Jump to the 7178// |done| label if a property with the given name is found. Jump to 7179// the |miss| label otherwise. 7180// If lookup was successful |scratch2| will be equal to elements + 4 * index. 7181void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, 7182 Label* miss, 7183 Label* done, 7184 Register elements, 7185 Register name, 7186 Register scratch1, 7187 Register scratch2) { 7188 ASSERT(!elements.is(scratch1)); 7189 ASSERT(!elements.is(scratch2)); 7190 ASSERT(!name.is(scratch1)); 7191 ASSERT(!name.is(scratch2)); 7192 7193 // Assert that name contains a string. 7194 if (FLAG_debug_code) __ AbortIfNotString(name); 7195 7196 // Compute the capacity mask. 7197 __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset)); 7198 __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int 7199 __ Subu(scratch1, scratch1, Operand(1)); 7200 7201 // Generate an unrolled loop that performs a few probes before 7202 // giving up. Measurements done on Gmail indicate that 2 probes 7203 // cover ~93% of loads from dictionaries. 7204 for (int i = 0; i < kInlinedProbes; i++) { 7205 // Compute the masked index: (hash + i + i * i) & mask. 7206 __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); 7207 if (i > 0) { 7208 // Add the probe offset (i + i * i) left shifted to avoid right shifting 7209 // the hash in a separate instruction. The value hash + i + i * i is right 7210 // shifted in the following and instruction. 7211 ASSERT(StringDictionary::GetProbeOffset(i) < 7212 1 << (32 - String::kHashFieldOffset)); 7213 __ Addu(scratch2, scratch2, Operand( 7214 StringDictionary::GetProbeOffset(i) << String::kHashShift)); 7215 } 7216 __ srl(scratch2, scratch2, String::kHashShift); 7217 __ And(scratch2, scratch1, scratch2); 7218 7219 // Scale the index by multiplying by the element size. 7220 ASSERT(StringDictionary::kEntrySize == 3); 7221 // scratch2 = scratch2 * 3. 7222 7223 __ sll(at, scratch2, 1); 7224 __ Addu(scratch2, scratch2, at); 7225 7226 // Check if the key is identical to the name. 7227 __ sll(at, scratch2, 2); 7228 __ Addu(scratch2, elements, at); 7229 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); 7230 __ Branch(done, eq, name, Operand(at)); 7231 } 7232 7233 const int spill_mask = 7234 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | 7235 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & 7236 ~(scratch1.bit() | scratch2.bit()); 7237 7238 __ MultiPush(spill_mask); 7239 if (name.is(a0)) { 7240 ASSERT(!elements.is(a1)); 7241 __ Move(a1, name); 7242 __ Move(a0, elements); 7243 } else { 7244 __ Move(a0, elements); 7245 __ Move(a1, name); 7246 } 7247 StringDictionaryLookupStub stub(POSITIVE_LOOKUP); 7248 __ CallStub(&stub); 7249 __ mov(scratch2, a2); 7250 __ mov(at, v0); 7251 __ MultiPop(spill_mask); 7252 7253 __ Branch(done, ne, at, Operand(zero_reg)); 7254 __ Branch(miss, eq, at, Operand(zero_reg)); 7255} 7256 7257 7258void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { 7259 // This stub overrides SometimesSetsUpAFrame() to return false. That means 7260 // we cannot call anything that could cause a GC from this stub. 7261 // Registers: 7262 // result: StringDictionary to probe 7263 // a1: key 7264 // : StringDictionary to probe. 7265 // index_: will hold an index of entry if lookup is successful. 7266 // might alias with result_. 7267 // Returns: 7268 // result_ is zero if lookup failed, non zero otherwise. 7269 7270 Register result = v0; 7271 Register dictionary = a0; 7272 Register key = a1; 7273 Register index = a2; 7274 Register mask = a3; 7275 Register hash = t0; 7276 Register undefined = t1; 7277 Register entry_key = t2; 7278 7279 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; 7280 7281 __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset)); 7282 __ sra(mask, mask, kSmiTagSize); 7283 __ Subu(mask, mask, Operand(1)); 7284 7285 __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset)); 7286 7287 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); 7288 7289 for (int i = kInlinedProbes; i < kTotalProbes; i++) { 7290 // Compute the masked index: (hash + i + i * i) & mask. 7291 // Capacity is smi 2^n. 7292 if (i > 0) { 7293 // Add the probe offset (i + i * i) left shifted to avoid right shifting 7294 // the hash in a separate instruction. The value hash + i + i * i is right 7295 // shifted in the following and instruction. 7296 ASSERT(StringDictionary::GetProbeOffset(i) < 7297 1 << (32 - String::kHashFieldOffset)); 7298 __ Addu(index, hash, Operand( 7299 StringDictionary::GetProbeOffset(i) << String::kHashShift)); 7300 } else { 7301 __ mov(index, hash); 7302 } 7303 __ srl(index, index, String::kHashShift); 7304 __ And(index, mask, index); 7305 7306 // Scale the index by multiplying by the entry size. 7307 ASSERT(StringDictionary::kEntrySize == 3); 7308 // index *= 3. 7309 __ mov(at, index); 7310 __ sll(index, index, 1); 7311 __ Addu(index, index, at); 7312 7313 7314 ASSERT_EQ(kSmiTagSize, 1); 7315 __ sll(index, index, 2); 7316 __ Addu(index, index, dictionary); 7317 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); 7318 7319 // Having undefined at this place means the name is not contained. 7320 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); 7321 7322 // Stop if found the property. 7323 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); 7324 7325 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { 7326 // Check if the entry name is not a symbol. 7327 __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); 7328 __ lbu(entry_key, 7329 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); 7330 __ And(result, entry_key, Operand(kIsSymbolMask)); 7331 __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg)); 7332 } 7333 } 7334 7335 __ bind(&maybe_in_dictionary); 7336 // If we are doing negative lookup then probing failure should be 7337 // treated as a lookup success. For positive lookup probing failure 7338 // should be treated as lookup failure. 7339 if (mode_ == POSITIVE_LOOKUP) { 7340 __ Ret(USE_DELAY_SLOT); 7341 __ mov(result, zero_reg); 7342 } 7343 7344 __ bind(&in_dictionary); 7345 __ Ret(USE_DELAY_SLOT); 7346 __ li(result, 1); 7347 7348 __ bind(¬_in_dictionary); 7349 __ Ret(USE_DELAY_SLOT); 7350 __ mov(result, zero_reg); 7351} 7352 7353 7354struct AheadOfTimeWriteBarrierStubList { 7355 Register object, value, address; 7356 RememberedSetAction action; 7357}; 7358 7359#define REG(Name) { kRegister_ ## Name ## _Code } 7360 7361static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { 7362 // Used in RegExpExecStub. 7363 { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET }, 7364 { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET }, 7365 // Used in CompileArrayPushCall. 7366 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. 7367 // Also used in KeyedStoreIC::GenerateGeneric. 7368 { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET }, 7369 // Used in CompileStoreGlobal. 7370 { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET }, 7371 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. 7372 { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET }, 7373 { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET }, 7374 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. 7375 { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET }, 7376 { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET }, 7377 // KeyedStoreStubCompiler::GenerateStoreFastElement. 7378 { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET }, 7379 { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET }, 7380 // ElementsTransitionGenerator::GenerateSmiOnlyToObject 7381 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble 7382 // and ElementsTransitionGenerator::GenerateDoubleToObject 7383 { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET }, 7384 { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET }, 7385 // ElementsTransitionGenerator::GenerateDoubleToObject 7386 { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET }, 7387 { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET }, 7388 // StoreArrayLiteralElementStub::Generate 7389 { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET }, 7390 // Null termination. 7391 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 7392}; 7393 7394#undef REG 7395 7396 7397bool RecordWriteStub::IsPregenerated() { 7398 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7399 !entry->object.is(no_reg); 7400 entry++) { 7401 if (object_.is(entry->object) && 7402 value_.is(entry->value) && 7403 address_.is(entry->address) && 7404 remembered_set_action_ == entry->action && 7405 save_fp_regs_mode_ == kDontSaveFPRegs) { 7406 return true; 7407 } 7408 } 7409 return false; 7410} 7411 7412 7413bool StoreBufferOverflowStub::IsPregenerated() { 7414 return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); 7415} 7416 7417 7418void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { 7419 StoreBufferOverflowStub stub1(kDontSaveFPRegs); 7420 stub1.GetCode()->set_is_pregenerated(true); 7421} 7422 7423 7424void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { 7425 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7426 !entry->object.is(no_reg); 7427 entry++) { 7428 RecordWriteStub stub(entry->object, 7429 entry->value, 7430 entry->address, 7431 entry->action, 7432 kDontSaveFPRegs); 7433 stub.GetCode()->set_is_pregenerated(true); 7434 } 7435} 7436 7437 7438// Takes the input in 3 registers: address_ value_ and object_. A pointer to 7439// the value has just been written into the object, now this stub makes sure 7440// we keep the GC informed. The word in the object where the value has been 7441// written is in the address register. 7442void RecordWriteStub::Generate(MacroAssembler* masm) { 7443 Label skip_to_incremental_noncompacting; 7444 Label skip_to_incremental_compacting; 7445 7446 // The first two branch+nop instructions are generated with labels so as to 7447 // get the offset fixed up correctly by the bind(Label*) call. We patch it 7448 // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this 7449 // position) and the "beq zero_reg, zero_reg, ..." when we start and stop 7450 // incremental heap marking. 7451 // See RecordWriteStub::Patch for details. 7452 __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting); 7453 __ nop(); 7454 __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting); 7455 __ nop(); 7456 7457 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 7458 __ RememberedSetHelper(object_, 7459 address_, 7460 value_, 7461 save_fp_regs_mode_, 7462 MacroAssembler::kReturnAtEnd); 7463 } 7464 __ Ret(); 7465 7466 __ bind(&skip_to_incremental_noncompacting); 7467 GenerateIncremental(masm, INCREMENTAL); 7468 7469 __ bind(&skip_to_incremental_compacting); 7470 GenerateIncremental(masm, INCREMENTAL_COMPACTION); 7471 7472 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. 7473 // Will be checked in IncrementalMarking::ActivateGeneratedStub. 7474 7475 PatchBranchIntoNop(masm, 0); 7476 PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); 7477} 7478 7479 7480void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { 7481 regs_.Save(masm); 7482 7483 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 7484 Label dont_need_remembered_set; 7485 7486 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); 7487 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. 7488 regs_.scratch0(), 7489 &dont_need_remembered_set); 7490 7491 __ CheckPageFlag(regs_.object(), 7492 regs_.scratch0(), 7493 1 << MemoryChunk::SCAN_ON_SCAVENGE, 7494 ne, 7495 &dont_need_remembered_set); 7496 7497 // First notify the incremental marker if necessary, then update the 7498 // remembered set. 7499 CheckNeedsToInformIncrementalMarker( 7500 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); 7501 InformIncrementalMarker(masm, mode); 7502 regs_.Restore(masm); 7503 __ RememberedSetHelper(object_, 7504 address_, 7505 value_, 7506 save_fp_regs_mode_, 7507 MacroAssembler::kReturnAtEnd); 7508 7509 __ bind(&dont_need_remembered_set); 7510 } 7511 7512 CheckNeedsToInformIncrementalMarker( 7513 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); 7514 InformIncrementalMarker(masm, mode); 7515 regs_.Restore(masm); 7516 __ Ret(); 7517} 7518 7519 7520void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { 7521 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); 7522 int argument_count = 3; 7523 __ PrepareCallCFunction(argument_count, regs_.scratch0()); 7524 Register address = 7525 a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); 7526 ASSERT(!address.is(regs_.object())); 7527 ASSERT(!address.is(a0)); 7528 __ Move(address, regs_.address()); 7529 __ Move(a0, regs_.object()); 7530 if (mode == INCREMENTAL_COMPACTION) { 7531 __ Move(a1, address); 7532 } else { 7533 ASSERT(mode == INCREMENTAL); 7534 __ lw(a1, MemOperand(address, 0)); 7535 } 7536 __ li(a2, Operand(ExternalReference::isolate_address())); 7537 7538 AllowExternalCallThatCantCauseGC scope(masm); 7539 if (mode == INCREMENTAL_COMPACTION) { 7540 __ CallCFunction( 7541 ExternalReference::incremental_evacuation_record_write_function( 7542 masm->isolate()), 7543 argument_count); 7544 } else { 7545 ASSERT(mode == INCREMENTAL); 7546 __ CallCFunction( 7547 ExternalReference::incremental_marking_record_write_function( 7548 masm->isolate()), 7549 argument_count); 7550 } 7551 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); 7552} 7553 7554 7555void RecordWriteStub::CheckNeedsToInformIncrementalMarker( 7556 MacroAssembler* masm, 7557 OnNoNeedToInformIncrementalMarker on_no_need, 7558 Mode mode) { 7559 Label on_black; 7560 Label need_incremental; 7561 Label need_incremental_pop_scratch; 7562 7563 // Let's look at the color of the object: If it is not black we don't have 7564 // to inform the incremental marker. 7565 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); 7566 7567 regs_.Restore(masm); 7568 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 7569 __ RememberedSetHelper(object_, 7570 address_, 7571 value_, 7572 save_fp_regs_mode_, 7573 MacroAssembler::kReturnAtEnd); 7574 } else { 7575 __ Ret(); 7576 } 7577 7578 __ bind(&on_black); 7579 7580 // Get the value from the slot. 7581 __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); 7582 7583 if (mode == INCREMENTAL_COMPACTION) { 7584 Label ensure_not_white; 7585 7586 __ CheckPageFlag(regs_.scratch0(), // Contains value. 7587 regs_.scratch1(), // Scratch. 7588 MemoryChunk::kEvacuationCandidateMask, 7589 eq, 7590 &ensure_not_white); 7591 7592 __ CheckPageFlag(regs_.object(), 7593 regs_.scratch1(), // Scratch. 7594 MemoryChunk::kSkipEvacuationSlotsRecordingMask, 7595 eq, 7596 &need_incremental); 7597 7598 __ bind(&ensure_not_white); 7599 } 7600 7601 // We need extra registers for this, so we push the object and the address 7602 // register temporarily. 7603 __ Push(regs_.object(), regs_.address()); 7604 __ EnsureNotWhite(regs_.scratch0(), // The value. 7605 regs_.scratch1(), // Scratch. 7606 regs_.object(), // Scratch. 7607 regs_.address(), // Scratch. 7608 &need_incremental_pop_scratch); 7609 __ Pop(regs_.object(), regs_.address()); 7610 7611 regs_.Restore(masm); 7612 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 7613 __ RememberedSetHelper(object_, 7614 address_, 7615 value_, 7616 save_fp_regs_mode_, 7617 MacroAssembler::kReturnAtEnd); 7618 } else { 7619 __ Ret(); 7620 } 7621 7622 __ bind(&need_incremental_pop_scratch); 7623 __ Pop(regs_.object(), regs_.address()); 7624 7625 __ bind(&need_incremental); 7626 7627 // Fall through when we need to inform the incremental marker. 7628} 7629 7630 7631void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { 7632 // ----------- S t a t e ------------- 7633 // -- a0 : element value to store 7634 // -- a1 : array literal 7635 // -- a2 : map of array literal 7636 // -- a3 : element index as smi 7637 // -- t0 : array literal index in function as smi 7638 // ----------------------------------- 7639 7640 Label element_done; 7641 Label double_elements; 7642 Label smi_element; 7643 Label slow_elements; 7644 Label fast_elements; 7645 7646 __ CheckFastElements(a2, t1, &double_elements); 7647 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS 7648 __ JumpIfSmi(a0, &smi_element); 7649 __ CheckFastSmiOnlyElements(a2, t1, &fast_elements); 7650 7651 // Store into the array literal requires a elements transition. Call into 7652 // the runtime. 7653 __ bind(&slow_elements); 7654 // call. 7655 __ Push(a1, a3, a0); 7656 __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 7657 __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset)); 7658 __ Push(t1, t0); 7659 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); 7660 7661 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. 7662 __ bind(&fast_elements); 7663 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); 7664 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); 7665 __ Addu(t2, t1, t2); 7666 __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 7667 __ sw(a0, MemOperand(t2, 0)); 7668 // Update the write barrier for the array store. 7669 __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs, 7670 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); 7671 __ Ret(USE_DELAY_SLOT); 7672 __ mov(v0, a0); 7673 7674 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or 7675 // FAST_ELEMENTS, and value is Smi. 7676 __ bind(&smi_element); 7677 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); 7678 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); 7679 __ Addu(t2, t1, t2); 7680 __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize)); 7681 __ Ret(USE_DELAY_SLOT); 7682 __ mov(v0, a0); 7683 7684 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. 7685 __ bind(&double_elements); 7686 __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); 7687 __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2, 7688 &slow_elements); 7689 __ Ret(USE_DELAY_SLOT); 7690 __ mov(v0, a0); 7691} 7692 7693 7694#undef __ 7695 7696} } // namespace v8::internal 7697 7698#endif // V8_TARGET_ARCH_MIPS 7699