1// Copyright 2014 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "test/unittests/compiler/instruction-selector-unittest.h" 6 7namespace v8 { 8namespace internal { 9namespace compiler { 10 11namespace { 12 13// Immediates (random subset). 14const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2, 15 3, 4, 5, 6, 7, 8, 16 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt}; 17 18} // namespace 19 20 21TEST_F(InstructionSelectorTest, Int32AddWithParameter) { 22 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), 23 MachineType::Int32()); 24 m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1))); 25 Stream s = m.Build(); 26 ASSERT_EQ(1U, s.size()); 27 EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); 28} 29 30 31TEST_F(InstructionSelectorTest, Int32AddWithImmediate) { 32 TRACED_FOREACH(int32_t, imm, kImmediates) { 33 { 34 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); 35 m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm))); 36 Stream s = m.Build(); 37 ASSERT_EQ(1U, s.size()); 38 EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); 39 if (imm == 0) { 40 ASSERT_EQ(1U, s[0]->InputCount()); 41 } else { 42 ASSERT_EQ(2U, s[0]->InputCount()); 43 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); 44 } 45 } 46 { 47 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); 48 m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0))); 49 Stream s = m.Build(); 50 ASSERT_EQ(1U, s.size()); 51 EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); 52 if (imm == 0) { 53 ASSERT_EQ(1U, s[0]->InputCount()); 54 } else { 55 ASSERT_EQ(2U, s[0]->InputCount()); 56 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); 57 } 58 } 59 } 60} 61 62 63TEST_F(InstructionSelectorTest, Int32SubWithParameter) { 64 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), 65 MachineType::Int32()); 66 m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1))); 67 Stream s = m.Build(); 68 ASSERT_EQ(1U, s.size()); 69 EXPECT_EQ(kIA32Sub, s[0]->arch_opcode()); 70 EXPECT_EQ(1U, s[0]->OutputCount()); 71} 72 73 74TEST_F(InstructionSelectorTest, Int32SubWithImmediate) { 75 TRACED_FOREACH(int32_t, imm, kImmediates) { 76 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); 77 m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm))); 78 Stream s = m.Build(); 79 ASSERT_EQ(1U, s.size()); 80 EXPECT_EQ(kIA32Sub, s[0]->arch_opcode()); 81 ASSERT_EQ(2U, s[0]->InputCount()); 82 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); 83 } 84} 85 86 87// ----------------------------------------------------------------------------- 88// Conversions. 89 90 91TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) { 92 StreamBuilder m(this, MachineType::Float32(), MachineType::Float64()); 93 m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0))); 94 Stream s = m.Build(); 95 ASSERT_EQ(1U, s.size()); 96 EXPECT_EQ(kSSEFloat32ToFloat64, s[0]->arch_opcode()); 97 EXPECT_EQ(1U, s[0]->InputCount()); 98 EXPECT_EQ(1U, s[0]->OutputCount()); 99} 100 101 102TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) { 103 StreamBuilder m(this, MachineType::Float64(), MachineType::Float32()); 104 m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0))); 105 Stream s = m.Build(); 106 ASSERT_EQ(1U, s.size()); 107 EXPECT_EQ(kSSEFloat64ToFloat32, s[0]->arch_opcode()); 108 EXPECT_EQ(1U, s[0]->InputCount()); 109 EXPECT_EQ(1U, s[0]->OutputCount()); 110} 111 112 113// ----------------------------------------------------------------------------- 114// Better left operand for commutative binops 115 116 117TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) { 118 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), 119 MachineType::Int32()); 120 Node* param1 = m.Parameter(0); 121 Node* param2 = m.Parameter(1); 122 Node* add = m.Int32Add(param1, param2); 123 m.Return(m.Int32Add(add, param1)); 124 Stream s = m.Build(); 125 ASSERT_EQ(2U, s.size()); 126 EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); 127 ASSERT_EQ(2U, s[0]->InputCount()); 128 ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated()); 129 EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0))); 130 EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(1))); 131 ASSERT_EQ(2U, s[1]->InputCount()); 132 EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0))); 133} 134 135 136TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) { 137 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), 138 MachineType::Int32()); 139 Node* param1 = m.Parameter(0); 140 Node* param2 = m.Parameter(1); 141 Node* mul = m.Int32Mul(param1, param2); 142 m.Return(m.Int32Mul(mul, param1)); 143 Stream s = m.Build(); 144 ASSERT_EQ(2U, s.size()); 145 EXPECT_EQ(kIA32Imul, s[0]->arch_opcode()); 146 ASSERT_EQ(2U, s[0]->InputCount()); 147 ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated()); 148 EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0))); 149 EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(1))); 150} 151 152 153// ----------------------------------------------------------------------------- 154// Conversions. 155 156 157TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) { 158 StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32()); 159 m.Return(m.ChangeUint32ToFloat64(m.Parameter(0))); 160 Stream s = m.Build(); 161 ASSERT_EQ(1U, s.size()); 162 EXPECT_EQ(kSSEUint32ToFloat64, s[0]->arch_opcode()); 163} 164 165 166// ----------------------------------------------------------------------------- 167// Loads and stores 168 169 170namespace { 171 172struct MemoryAccess { 173 MachineType type; 174 ArchOpcode load_opcode; 175 ArchOpcode store_opcode; 176}; 177 178 179std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) { 180 return os << memacc.type; 181} 182 183 184static const MemoryAccess kMemoryAccesses[] = { 185 {MachineType::Int8(), kIA32Movsxbl, kIA32Movb}, 186 {MachineType::Uint8(), kIA32Movzxbl, kIA32Movb}, 187 {MachineType::Int16(), kIA32Movsxwl, kIA32Movw}, 188 {MachineType::Uint16(), kIA32Movzxwl, kIA32Movw}, 189 {MachineType::Int32(), kIA32Movl, kIA32Movl}, 190 {MachineType::Uint32(), kIA32Movl, kIA32Movl}, 191 {MachineType::Float32(), kIA32Movss, kIA32Movss}, 192 {MachineType::Float64(), kIA32Movsd, kIA32Movsd}}; 193 194} // namespace 195 196 197typedef InstructionSelectorTestWithParam<MemoryAccess> 198 InstructionSelectorMemoryAccessTest; 199 200 201TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) { 202 const MemoryAccess memacc = GetParam(); 203 StreamBuilder m(this, memacc.type, MachineType::Pointer(), 204 MachineType::Int32()); 205 m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1))); 206 Stream s = m.Build(); 207 ASSERT_EQ(1U, s.size()); 208 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); 209 EXPECT_EQ(2U, s[0]->InputCount()); 210 EXPECT_EQ(1U, s[0]->OutputCount()); 211} 212 213 214TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) { 215 const MemoryAccess memacc = GetParam(); 216 TRACED_FOREACH(int32_t, base, kImmediates) { 217 StreamBuilder m(this, memacc.type, MachineType::Pointer()); 218 m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0))); 219 Stream s = m.Build(); 220 ASSERT_EQ(1U, s.size()); 221 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); 222 if (base == 0) { 223 ASSERT_EQ(1U, s[0]->InputCount()); 224 } else { 225 ASSERT_EQ(2U, s[0]->InputCount()); 226 ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); 227 EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1))); 228 } 229 EXPECT_EQ(1U, s[0]->OutputCount()); 230 } 231} 232 233 234TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) { 235 const MemoryAccess memacc = GetParam(); 236 TRACED_FOREACH(int32_t, index, kImmediates) { 237 StreamBuilder m(this, memacc.type, MachineType::Pointer()); 238 m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index))); 239 Stream s = m.Build(); 240 ASSERT_EQ(1U, s.size()); 241 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); 242 if (index == 0) { 243 ASSERT_EQ(1U, s[0]->InputCount()); 244 } else { 245 ASSERT_EQ(2U, s[0]->InputCount()); 246 ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); 247 EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); 248 } 249 EXPECT_EQ(1U, s[0]->OutputCount()); 250 } 251} 252 253 254TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) { 255 const MemoryAccess memacc = GetParam(); 256 StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), 257 MachineType::Int32(), memacc.type); 258 m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1), 259 m.Parameter(2), kNoWriteBarrier); 260 m.Return(m.Int32Constant(0)); 261 Stream s = m.Build(); 262 ASSERT_EQ(1U, s.size()); 263 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); 264 EXPECT_EQ(3U, s[0]->InputCount()); 265 EXPECT_EQ(0U, s[0]->OutputCount()); 266} 267 268 269TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) { 270 const MemoryAccess memacc = GetParam(); 271 TRACED_FOREACH(int32_t, base, kImmediates) { 272 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), 273 memacc.type); 274 m.Store(memacc.type.representation(), m.Int32Constant(base), m.Parameter(0), 275 m.Parameter(1), kNoWriteBarrier); 276 m.Return(m.Int32Constant(0)); 277 Stream s = m.Build(); 278 ASSERT_EQ(1U, s.size()); 279 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); 280 if (base == 0) { 281 ASSERT_EQ(2U, s[0]->InputCount()); 282 } else { 283 ASSERT_EQ(3U, s[0]->InputCount()); 284 ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); 285 EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1))); 286 } 287 EXPECT_EQ(0U, s[0]->OutputCount()); 288 } 289} 290 291 292TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) { 293 const MemoryAccess memacc = GetParam(); 294 TRACED_FOREACH(int32_t, index, kImmediates) { 295 StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), 296 memacc.type); 297 m.Store(memacc.type.representation(), m.Parameter(0), 298 m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier); 299 m.Return(m.Int32Constant(0)); 300 Stream s = m.Build(); 301 ASSERT_EQ(1U, s.size()); 302 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); 303 if (index == 0) { 304 ASSERT_EQ(2U, s[0]->InputCount()); 305 } else { 306 ASSERT_EQ(3U, s[0]->InputCount()); 307 ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); 308 EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); 309 } 310 EXPECT_EQ(0U, s[0]->OutputCount()); 311 } 312} 313 314 315INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, 316 InstructionSelectorMemoryAccessTest, 317 ::testing::ValuesIn(kMemoryAccesses)); 318 319 320// ----------------------------------------------------------------------------- 321// AddressingMode for loads and stores. 322 323 324class AddressingModeUnitTest : public InstructionSelectorTest { 325 public: 326 AddressingModeUnitTest() : m(NULL) { Reset(); } 327 ~AddressingModeUnitTest() { delete m; } 328 329 void Run(Node* base, Node* load_index, Node* store_index, 330 AddressingMode mode) { 331 Node* load = m->Load(MachineType::Int32(), base, load_index); 332 m->Store(MachineRepresentation::kWord32, base, store_index, load, 333 kNoWriteBarrier); 334 m->Return(m->Int32Constant(0)); 335 Stream s = m->Build(); 336 ASSERT_EQ(2U, s.size()); 337 EXPECT_EQ(mode, s[0]->addressing_mode()); 338 EXPECT_EQ(mode, s[1]->addressing_mode()); 339 } 340 341 Node* zero; 342 Node* null_ptr; 343 Node* non_zero; 344 Node* base_reg; // opaque value to generate base as register 345 Node* index_reg; // opaque value to generate index as register 346 Node* scales[4]; 347 StreamBuilder* m; 348 349 void Reset() { 350 delete m; 351 m = new StreamBuilder(this, MachineType::Int32(), MachineType::Int32(), 352 MachineType::Int32()); 353 zero = m->Int32Constant(0); 354 null_ptr = m->Int32Constant(0); 355 non_zero = m->Int32Constant(127); 356 base_reg = m->Parameter(0); 357 index_reg = m->Parameter(0); 358 359 scales[0] = m->Int32Constant(1); 360 scales[1] = m->Int32Constant(2); 361 scales[2] = m->Int32Constant(4); 362 scales[3] = m->Int32Constant(8); 363 } 364}; 365 366 367TEST_F(AddressingModeUnitTest, AddressingMode_MR) { 368 Node* base = base_reg; 369 Node* index = zero; 370 Run(base, index, index, kMode_MR); 371} 372 373 374TEST_F(AddressingModeUnitTest, AddressingMode_MRI) { 375 Node* base = base_reg; 376 Node* index = non_zero; 377 Run(base, index, index, kMode_MRI); 378} 379 380 381TEST_F(AddressingModeUnitTest, AddressingMode_MR1) { 382 Node* base = base_reg; 383 Node* index = index_reg; 384 Run(base, index, index, kMode_MR1); 385} 386 387 388TEST_F(AddressingModeUnitTest, AddressingMode_MRN) { 389 AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8}; 390 for (size_t i = 0; i < arraysize(scales); ++i) { 391 Reset(); 392 Node* base = base_reg; 393 Node* load_index = m->Int32Mul(index_reg, scales[i]); 394 Node* store_index = m->Int32Mul(index_reg, scales[i]); 395 Run(base, load_index, store_index, expected[i]); 396 } 397} 398 399 400TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) { 401 Node* base = base_reg; 402 Node* load_index = m->Int32Add(index_reg, non_zero); 403 Node* store_index = m->Int32Add(index_reg, non_zero); 404 Run(base, load_index, store_index, kMode_MR1I); 405} 406 407 408TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) { 409 AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I}; 410 for (size_t i = 0; i < arraysize(scales); ++i) { 411 Reset(); 412 Node* base = base_reg; 413 Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); 414 Node* store_index = 415 m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); 416 Run(base, load_index, store_index, expected[i]); 417 } 418} 419 420 421TEST_F(AddressingModeUnitTest, AddressingMode_M1ToMR) { 422 Node* base = null_ptr; 423 Node* index = index_reg; 424 // M1 maps to MR 425 Run(base, index, index, kMode_MR); 426} 427 428 429TEST_F(AddressingModeUnitTest, AddressingMode_MN) { 430 AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8}; 431 for (size_t i = 0; i < arraysize(scales); ++i) { 432 Reset(); 433 Node* base = null_ptr; 434 Node* load_index = m->Int32Mul(index_reg, scales[i]); 435 Node* store_index = m->Int32Mul(index_reg, scales[i]); 436 Run(base, load_index, store_index, expected[i]); 437 } 438} 439 440 441TEST_F(AddressingModeUnitTest, AddressingMode_M1IToMRI) { 442 Node* base = null_ptr; 443 Node* load_index = m->Int32Add(index_reg, non_zero); 444 Node* store_index = m->Int32Add(index_reg, non_zero); 445 // M1I maps to MRI 446 Run(base, load_index, store_index, kMode_MRI); 447} 448 449 450TEST_F(AddressingModeUnitTest, AddressingMode_MNI) { 451 AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I}; 452 for (size_t i = 0; i < arraysize(scales); ++i) { 453 Reset(); 454 Node* base = null_ptr; 455 Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); 456 Node* store_index = 457 m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero); 458 Run(base, load_index, store_index, expected[i]); 459 } 460} 461 462 463TEST_F(AddressingModeUnitTest, AddressingMode_MI) { 464 Node* bases[] = {null_ptr, non_zero}; 465 Node* indices[] = {zero, non_zero}; 466 for (size_t i = 0; i < arraysize(bases); ++i) { 467 for (size_t j = 0; j < arraysize(indices); ++j) { 468 Reset(); 469 Node* base = bases[i]; 470 Node* index = indices[j]; 471 Run(base, index, index, kMode_MI); 472 } 473 } 474} 475 476 477// ----------------------------------------------------------------------------- 478// Multiplication. 479 480 481namespace { 482 483struct MultParam { 484 int value; 485 bool lea_expected; 486 AddressingMode addressing_mode; 487}; 488 489 490std::ostream& operator<<(std::ostream& os, const MultParam& m) { 491 return os << m.value << "." << m.lea_expected << "." << m.addressing_mode; 492} 493 494 495const MultParam kMultParams[] = {{-1, false, kMode_None}, 496 {0, false, kMode_None}, 497 {1, true, kMode_MR}, 498 {2, true, kMode_M2}, 499 {3, true, kMode_MR2}, 500 {4, true, kMode_M4}, 501 {5, true, kMode_MR4}, 502 {6, false, kMode_None}, 503 {7, false, kMode_None}, 504 {8, true, kMode_M8}, 505 {9, true, kMode_MR8}, 506 {10, false, kMode_None}, 507 {11, false, kMode_None}}; 508 509} // namespace 510 511 512typedef InstructionSelectorTestWithParam<MultParam> InstructionSelectorMultTest; 513 514 515static unsigned InputCountForLea(AddressingMode mode) { 516 switch (mode) { 517 case kMode_MR1I: 518 case kMode_MR2I: 519 case kMode_MR4I: 520 case kMode_MR8I: 521 return 3U; 522 case kMode_M1I: 523 case kMode_M2I: 524 case kMode_M4I: 525 case kMode_M8I: 526 return 2U; 527 case kMode_MR1: 528 case kMode_MR2: 529 case kMode_MR4: 530 case kMode_MR8: 531 case kMode_MRI: 532 return 2U; 533 case kMode_M1: 534 case kMode_M2: 535 case kMode_M4: 536 case kMode_M8: 537 case kMode_MI: 538 case kMode_MR: 539 return 1U; 540 default: 541 UNREACHABLE(); 542 return 0U; 543 } 544} 545 546 547static AddressingMode AddressingModeForAddMult(int32_t imm, 548 const MultParam& m) { 549 if (imm == 0) return m.addressing_mode; 550 switch (m.addressing_mode) { 551 case kMode_MR1: 552 return kMode_MR1I; 553 case kMode_MR2: 554 return kMode_MR2I; 555 case kMode_MR4: 556 return kMode_MR4I; 557 case kMode_MR8: 558 return kMode_MR8I; 559 case kMode_M1: 560 return kMode_M1I; 561 case kMode_M2: 562 return kMode_M2I; 563 case kMode_M4: 564 return kMode_M4I; 565 case kMode_M8: 566 return kMode_M8I; 567 case kMode_MR: 568 return kMode_MRI; 569 default: 570 UNREACHABLE(); 571 return kMode_None; 572 } 573} 574 575 576TEST_P(InstructionSelectorMultTest, Mult32) { 577 const MultParam m_param = GetParam(); 578 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); 579 Node* param = m.Parameter(0); 580 Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value)); 581 m.Return(mult); 582 Stream s = m.Build(); 583 ASSERT_EQ(1U, s.size()); 584 EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode()); 585 if (m_param.lea_expected) { 586 EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); 587 ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount()); 588 } else { 589 EXPECT_EQ(kIA32Imul, s[0]->arch_opcode()); 590 ASSERT_EQ(2U, s[0]->InputCount()); 591 } 592 EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0))); 593} 594 595 596TEST_P(InstructionSelectorMultTest, MultAdd32) { 597 TRACED_FOREACH(int32_t, imm, kImmediates) { 598 const MultParam m_param = GetParam(); 599 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); 600 Node* param = m.Parameter(0); 601 Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)), 602 m.Int32Constant(imm)); 603 m.Return(mult); 604 Stream s = m.Build(); 605 if (m_param.lea_expected) { 606 ASSERT_EQ(1U, s.size()); 607 EXPECT_EQ(kIA32Lea, s[0]->arch_opcode()); 608 EXPECT_EQ(AddressingModeForAddMult(imm, m_param), 609 s[0]->addressing_mode()); 610 unsigned input_count = InputCountForLea(s[0]->addressing_mode()); 611 ASSERT_EQ(input_count, s[0]->InputCount()); 612 if (imm != 0) { 613 ASSERT_EQ(InstructionOperand::IMMEDIATE, 614 s[0]->InputAt(input_count - 1)->kind()); 615 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1))); 616 } 617 } else { 618 ASSERT_EQ(2U, s.size()); 619 EXPECT_EQ(kIA32Imul, s[0]->arch_opcode()); 620 EXPECT_EQ(kIA32Lea, s[1]->arch_opcode()); 621 } 622 } 623} 624 625 626INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest, 627 ::testing::ValuesIn(kMultParams)); 628 629 630TEST_F(InstructionSelectorTest, Int32MulHigh) { 631 StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(), 632 MachineType::Int32()); 633 Node* const p0 = m.Parameter(0); 634 Node* const p1 = m.Parameter(1); 635 Node* const n = m.Int32MulHigh(p0, p1); 636 m.Return(n); 637 Stream s = m.Build(); 638 ASSERT_EQ(1U, s.size()); 639 EXPECT_EQ(kIA32ImulHigh, s[0]->arch_opcode()); 640 ASSERT_EQ(2U, s[0]->InputCount()); 641 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 642 EXPECT_TRUE(s.IsFixed(s[0]->InputAt(0), eax)); 643 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); 644 EXPECT_TRUE(!s.IsUsedAtStart(s[0]->InputAt(1))); 645 ASSERT_EQ(1U, s[0]->OutputCount()); 646 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 647 EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), edx)); 648} 649 650 651// ----------------------------------------------------------------------------- 652// Floating point operations. 653 654 655TEST_F(InstructionSelectorTest, Float32Abs) { 656 { 657 StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); 658 Node* const p0 = m.Parameter(0); 659 Node* const n = m.Float32Abs(p0); 660 m.Return(n); 661 Stream s = m.Build(); 662 ASSERT_EQ(1U, s.size()); 663 EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode()); 664 ASSERT_EQ(1U, s[0]->InputCount()); 665 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 666 ASSERT_EQ(1U, s[0]->OutputCount()); 667 EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); 668 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 669 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 670 } 671 { 672 StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); 673 Node* const p0 = m.Parameter(0); 674 Node* const n = m.Float32Abs(p0); 675 m.Return(n); 676 Stream s = m.Build(AVX); 677 ASSERT_EQ(1U, s.size()); 678 EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode()); 679 ASSERT_EQ(1U, s[0]->InputCount()); 680 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 681 ASSERT_EQ(1U, s[0]->OutputCount()); 682 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 683 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 684 } 685} 686 687 688TEST_F(InstructionSelectorTest, Float64Abs) { 689 { 690 StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); 691 Node* const p0 = m.Parameter(0); 692 Node* const n = m.Float64Abs(p0); 693 m.Return(n); 694 Stream s = m.Build(); 695 ASSERT_EQ(1U, s.size()); 696 EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode()); 697 ASSERT_EQ(1U, s[0]->InputCount()); 698 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 699 ASSERT_EQ(1U, s[0]->OutputCount()); 700 EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); 701 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 702 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 703 } 704 { 705 StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); 706 Node* const p0 = m.Parameter(0); 707 Node* const n = m.Float64Abs(p0); 708 m.Return(n); 709 Stream s = m.Build(AVX); 710 ASSERT_EQ(1U, s.size()); 711 EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode()); 712 ASSERT_EQ(1U, s[0]->InputCount()); 713 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 714 ASSERT_EQ(1U, s[0]->OutputCount()); 715 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 716 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 717 } 718} 719 720 721TEST_F(InstructionSelectorTest, Float64BinopArithmetic) { 722 { 723 StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), 724 MachineType::Float64()); 725 Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); 726 Node* mul = m.Float64Mul(add, m.Parameter(1)); 727 Node* sub = m.Float64Sub(mul, add); 728 Node* ret = m.Float64Div(mul, sub); 729 m.Return(ret); 730 Stream s = m.Build(AVX); 731 ASSERT_EQ(4U, s.size()); 732 EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode()); 733 EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode()); 734 EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode()); 735 EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode()); 736 } 737 { 738 StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), 739 MachineType::Float64()); 740 Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1)); 741 Node* mul = m.Float64Mul(add, m.Parameter(1)); 742 Node* sub = m.Float64Sub(mul, add); 743 Node* ret = m.Float64Div(mul, sub); 744 m.Return(ret); 745 Stream s = m.Build(); 746 ASSERT_EQ(4U, s.size()); 747 EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode()); 748 EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode()); 749 EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode()); 750 EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode()); 751 } 752} 753 754 755TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) { 756 { 757 StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); 758 Node* const p0 = m.Parameter(0); 759 Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); 760 m.Return(n); 761 Stream s = m.Build(); 762 ASSERT_EQ(1U, s.size()); 763 EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode()); 764 ASSERT_EQ(1U, s[0]->InputCount()); 765 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 766 ASSERT_EQ(1U, s[0]->OutputCount()); 767 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 768 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 769 } 770 { 771 StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); 772 Node* const p0 = m.Parameter(0); 773 Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0); 774 m.Return(n); 775 Stream s = m.Build(AVX); 776 ASSERT_EQ(1U, s.size()); 777 EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode()); 778 ASSERT_EQ(1U, s[0]->InputCount()); 779 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 780 ASSERT_EQ(1U, s[0]->OutputCount()); 781 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 782 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 783 } 784} 785 786 787TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) { 788 { 789 StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); 790 Node* const p0 = m.Parameter(0); 791 Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); 792 m.Return(n); 793 Stream s = m.Build(); 794 ASSERT_EQ(1U, s.size()); 795 EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode()); 796 ASSERT_EQ(1U, s[0]->InputCount()); 797 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 798 ASSERT_EQ(1U, s[0]->OutputCount()); 799 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 800 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 801 } 802 { 803 StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); 804 Node* const p0 = m.Parameter(0); 805 Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0); 806 m.Return(n); 807 Stream s = m.Build(AVX); 808 ASSERT_EQ(1U, s.size()); 809 EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode()); 810 ASSERT_EQ(1U, s[0]->InputCount()); 811 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 812 ASSERT_EQ(1U, s[0]->OutputCount()); 813 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 814 EXPECT_EQ(kFlags_none, s[0]->flags_mode()); 815 } 816} 817 818 819// ----------------------------------------------------------------------------- 820// Miscellaneous. 821 822 823TEST_F(InstructionSelectorTest, Uint32LessThanWithLoadAndLoadStackPointer) { 824 StreamBuilder m(this, MachineType::Bool()); 825 Node* const sl = m.Load( 826 MachineType::Pointer(), 827 m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate()))); 828 Node* const sp = m.LoadStackPointer(); 829 Node* const n = m.Uint32LessThan(sl, sp); 830 m.Return(n); 831 Stream s = m.Build(); 832 ASSERT_EQ(1U, s.size()); 833 EXPECT_EQ(kIA32StackCheck, s[0]->arch_opcode()); 834 ASSERT_EQ(0U, s[0]->InputCount()); 835 ASSERT_EQ(1U, s[0]->OutputCount()); 836 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 837 EXPECT_EQ(kFlags_set, s[0]->flags_mode()); 838 EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition()); 839} 840 841 842TEST_F(InstructionSelectorTest, Word32Clz) { 843 StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32()); 844 Node* const p0 = m.Parameter(0); 845 Node* const n = m.Word32Clz(p0); 846 m.Return(n); 847 Stream s = m.Build(); 848 ASSERT_EQ(1U, s.size()); 849 EXPECT_EQ(kIA32Lzcnt, s[0]->arch_opcode()); 850 ASSERT_EQ(1U, s[0]->InputCount()); 851 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); 852 ASSERT_EQ(1U, s[0]->OutputCount()); 853 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); 854} 855 856} // namespace compiler 857} // namespace internal 858} // namespace v8 859