1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
6
7#include "src/base/bits.h"
8#include "src/builtins/builtins-constructor.h"
9#include "src/code-factory.h"
10#include "src/code-stubs.h"
11#include "src/crankshaft/hydrogen-osr.h"
12#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
13#include "src/ic/ic.h"
14#include "src/ic/stub-cache.h"
15
16namespace v8 {
17namespace internal {
18
19
20class SafepointGenerator final : public CallWrapper {
21 public:
22  SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
23                     Safepoint::DeoptMode mode)
24      : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
25  virtual ~SafepointGenerator() {}
26
27  void BeforeCall(int call_size) const override {}
28
29  void AfterCall() const override {
30    codegen_->RecordSafepoint(pointers_, deopt_mode_);
31  }
32
33 private:
34  LCodeGen* codegen_;
35  LPointerMap* pointers_;
36  Safepoint::DeoptMode deopt_mode_;
37};
38
39LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
40    LCodeGen* codegen)
41    : codegen_(codegen) {
42  DCHECK(codegen_->info()->is_calling());
43  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
44  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
45  StoreRegistersStateStub stub(codegen_->isolate());
46  codegen_->masm_->CallStub(&stub);
47}
48
49LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
50  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
51  RestoreRegistersStateStub stub(codegen_->isolate());
52  codegen_->masm_->CallStub(&stub);
53  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
54}
55
56#define __ masm()->
57
58bool LCodeGen::GenerateCode() {
59  LPhase phase("Z_Code generation", chunk());
60  DCHECK(is_unused());
61  status_ = GENERATING;
62
63  // Open a frame scope to indicate that there is a frame on the stack.  The
64  // NONE indicates that the scope shouldn't actually generate code to set up
65  // the frame (that is done in GeneratePrologue).
66  FrameScope frame_scope(masm_, StackFrame::NONE);
67
68  bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
69            GenerateJumpTable() && GenerateSafepointTable();
70  if (FLAG_enable_embedded_constant_pool && !rc) {
71    masm()->AbortConstantPoolBuilding();
72  }
73  return rc;
74}
75
76
77void LCodeGen::FinishCode(Handle<Code> code) {
78  DCHECK(is_done());
79  code->set_stack_slots(GetTotalFrameSlotCount());
80  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
81  PopulateDeoptimizationData(code);
82}
83
84
85void LCodeGen::SaveCallerDoubles() {
86  DCHECK(info()->saves_caller_doubles());
87  DCHECK(NeedsEagerFrame());
88  Comment(";;; Save clobbered callee double registers");
89  int count = 0;
90  BitVector* doubles = chunk()->allocated_double_registers();
91  BitVector::Iterator save_iterator(doubles);
92  while (!save_iterator.Done()) {
93    __ stfd(DoubleRegister::from_code(save_iterator.Current()),
94            MemOperand(sp, count * kDoubleSize));
95    save_iterator.Advance();
96    count++;
97  }
98}
99
100
101void LCodeGen::RestoreCallerDoubles() {
102  DCHECK(info()->saves_caller_doubles());
103  DCHECK(NeedsEagerFrame());
104  Comment(";;; Restore clobbered callee double registers");
105  BitVector* doubles = chunk()->allocated_double_registers();
106  BitVector::Iterator save_iterator(doubles);
107  int count = 0;
108  while (!save_iterator.Done()) {
109    __ lfd(DoubleRegister::from_code(save_iterator.Current()),
110           MemOperand(sp, count * kDoubleSize));
111    save_iterator.Advance();
112    count++;
113  }
114}
115
116
117bool LCodeGen::GeneratePrologue() {
118  DCHECK(is_generating());
119
120  if (info()->IsOptimizing()) {
121    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122
123    // r4: Callee's JS function.
124    // cp: Callee's context.
125    // pp: Callee's constant pool pointer (if enabled)
126    // fp: Caller's frame pointer.
127    // lr: Caller's pc.
128    // ip: Our own function entry (required by the prologue)
129  }
130
131  int prologue_offset = masm_->pc_offset();
132
133  if (prologue_offset) {
134    // Prologue logic requires it's starting address in ip and the
135    // corresponding offset from the function entry.
136    prologue_offset += Instruction::kInstrSize;
137    __ addi(ip, ip, Operand(prologue_offset));
138  }
139  info()->set_prologue_offset(prologue_offset);
140  if (NeedsEagerFrame()) {
141    if (info()->IsStub()) {
142      __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
143    } else {
144      __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
145    }
146    frame_is_built_ = true;
147  }
148
149  // Reserve space for the stack slots needed by the code.
150  int slots = GetStackSlotCount();
151  if (slots > 0) {
152    __ subi(sp, sp, Operand(slots * kPointerSize));
153    if (FLAG_debug_code) {
154      __ Push(r3, r4);
155      __ li(r0, Operand(slots));
156      __ mtctr(r0);
157      __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
158      __ mov(r4, Operand(kSlotsZapValue));
159      Label loop;
160      __ bind(&loop);
161      __ StorePU(r4, MemOperand(r3, -kPointerSize));
162      __ bdnz(&loop);
163      __ Pop(r3, r4);
164    }
165  }
166
167  if (info()->saves_caller_doubles()) {
168    SaveCallerDoubles();
169  }
170  return !is_aborted();
171}
172
173
174void LCodeGen::DoPrologue(LPrologue* instr) {
175  Comment(";;; Prologue begin");
176
177  // Possibly allocate a local context.
178  if (info()->scope()->NeedsContext()) {
179    Comment(";;; Allocate local context");
180    bool need_write_barrier = true;
181    // Argument to NewContext is the function, which is in r4.
182    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
183    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
184    if (info()->scope()->is_script_scope()) {
185      __ push(r4);
186      __ Push(info()->scope()->scope_info());
187      __ CallRuntime(Runtime::kNewScriptContext);
188      deopt_mode = Safepoint::kLazyDeopt;
189    } else {
190      if (slots <=
191          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
192        Callable callable = CodeFactory::FastNewFunctionContext(
193            isolate(), info()->scope()->scope_type());
194        __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
195               Operand(slots));
196        __ Call(callable.code(), RelocInfo::CODE_TARGET);
197        // Result of the FastNewFunctionContext builtin is always in new space.
198        need_write_barrier = false;
199      } else {
200        __ push(r4);
201        __ Push(Smi::FromInt(info()->scope()->scope_type()));
202        __ CallRuntime(Runtime::kNewFunctionContext);
203      }
204    }
205    RecordSafepoint(deopt_mode);
206
207    // Context is returned in both r3 and cp.  It replaces the context
208    // passed to us.  It's saved in the stack and kept live in cp.
209    __ mr(cp, r3);
210    __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
211    // Copy any necessary parameters into the context.
212    int num_parameters = info()->scope()->num_parameters();
213    int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
214    for (int i = first_parameter; i < num_parameters; i++) {
215      Variable* var = (i == -1) ? info()->scope()->receiver()
216                                : info()->scope()->parameter(i);
217      if (var->IsContextSlot()) {
218        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
219                               (num_parameters - 1 - i) * kPointerSize;
220        // Load parameter from stack.
221        __ LoadP(r3, MemOperand(fp, parameter_offset));
222        // Store it in the context.
223        MemOperand target = ContextMemOperand(cp, var->index());
224        __ StoreP(r3, target, r0);
225        // Update the write barrier. This clobbers r6 and r3.
226        if (need_write_barrier) {
227          __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
228                                    GetLinkRegisterState(), kSaveFPRegs);
229        } else if (FLAG_debug_code) {
230          Label done;
231          __ JumpIfInNewSpace(cp, r3, &done);
232          __ Abort(kExpectedNewSpaceObject);
233          __ bind(&done);
234        }
235      }
236    }
237    Comment(";;; End allocate local context");
238  }
239
240  Comment(";;; Prologue end");
241}
242
243
244void LCodeGen::GenerateOsrPrologue() {
245  // Generate the OSR entry prologue at the first unknown OSR value, or if there
246  // are none, at the OSR entrypoint instruction.
247  if (osr_pc_offset_ >= 0) return;
248
249  osr_pc_offset_ = masm()->pc_offset();
250
251  // Adjust the frame size, subsuming the unoptimized frame into the
252  // optimized frame.
253  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
254  DCHECK(slots >= 0);
255  __ subi(sp, sp, Operand(slots * kPointerSize));
256}
257
258
259void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
260  if (instr->IsCall()) {
261    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
262  }
263  if (!instr->IsLazyBailout() && !instr->IsGap()) {
264    safepoints_.BumpLastLazySafepointIndex();
265  }
266}
267
268
269bool LCodeGen::GenerateDeferredCode() {
270  DCHECK(is_generating());
271  if (deferred_.length() > 0) {
272    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
273      LDeferredCode* code = deferred_[i];
274
275      HValue* value =
276          instructions_->at(code->instruction_index())->hydrogen_value();
277      RecordAndWritePosition(value->position());
278
279      Comment(
280          ";;; <@%d,#%d> "
281          "-------------------- Deferred %s --------------------",
282          code->instruction_index(), code->instr()->hydrogen_value()->id(),
283          code->instr()->Mnemonic());
284      __ bind(code->entry());
285      if (NeedsDeferredFrame()) {
286        Comment(";;; Build frame");
287        DCHECK(!frame_is_built_);
288        DCHECK(info()->IsStub());
289        frame_is_built_ = true;
290        __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
291        __ PushCommonFrame(scratch0());
292        Comment(";;; Deferred code");
293      }
294      code->Generate();
295      if (NeedsDeferredFrame()) {
296        Comment(";;; Destroy frame");
297        DCHECK(frame_is_built_);
298        __ PopCommonFrame(scratch0());
299        frame_is_built_ = false;
300      }
301      __ b(code->exit());
302    }
303  }
304
305  return !is_aborted();
306}
307
308
309bool LCodeGen::GenerateJumpTable() {
310  // Check that the jump table is accessible from everywhere in the function
311  // code, i.e. that offsets to the table can be encoded in the 24bit signed
312  // immediate of a branch instruction.
313  // To simplify we consider the code size from the first instruction to the
314  // end of the jump table. We also don't consider the pc load delta.
315  // Each entry in the jump table generates one instruction and inlines one
316  // 32bit data after it.
317  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
318                jump_table_.length() * 7)) {
319    Abort(kGeneratedCodeIsTooLarge);
320  }
321
322  if (jump_table_.length() > 0) {
323    Label needs_frame, call_deopt_entry;
324
325    Comment(";;; -------------------- Jump table --------------------");
326    Address base = jump_table_[0].address;
327
328    Register entry_offset = scratch0();
329
330    int length = jump_table_.length();
331    for (int i = 0; i < length; i++) {
332      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
333      __ bind(&table_entry->label);
334
335      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
336      Address entry = table_entry->address;
337      DeoptComment(table_entry->deopt_info);
338
339      // Second-level deopt table entries are contiguous and small, so instead
340      // of loading the full, absolute address of each one, load an immediate
341      // offset which will be added to the base address later.
342      __ mov(entry_offset, Operand(entry - base));
343
344      if (table_entry->needs_frame) {
345        DCHECK(!info()->saves_caller_doubles());
346        Comment(";;; call deopt with frame");
347        __ PushCommonFrame();
348        __ b(&needs_frame, SetLK);
349      } else {
350        __ b(&call_deopt_entry, SetLK);
351      }
352    }
353
354    if (needs_frame.is_linked()) {
355      __ bind(&needs_frame);
356      // This variant of deopt can only be used with stubs. Since we don't
357      // have a function pointer to install in the stack frame that we're
358      // building, install a special marker there instead.
359      __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
360      __ push(ip);
361      DCHECK(info()->IsStub());
362    }
363
364    Comment(";;; call deopt");
365    __ bind(&call_deopt_entry);
366
367    if (info()->saves_caller_doubles()) {
368      DCHECK(info()->IsStub());
369      RestoreCallerDoubles();
370    }
371
372    // Add the base address to the offset previously loaded in entry_offset.
373    __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
374    __ add(ip, entry_offset, ip);
375    __ Jump(ip);
376  }
377
378  // The deoptimization jump table is the last part of the instruction
379  // sequence. Mark the generated code as done unless we bailed out.
380  if (!is_aborted()) status_ = DONE;
381  return !is_aborted();
382}
383
384
385bool LCodeGen::GenerateSafepointTable() {
386  DCHECK(is_done());
387  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
388  return !is_aborted();
389}
390
391
392Register LCodeGen::ToRegister(int code) const {
393  return Register::from_code(code);
394}
395
396
397DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
398  return DoubleRegister::from_code(code);
399}
400
401
402Register LCodeGen::ToRegister(LOperand* op) const {
403  DCHECK(op->IsRegister());
404  return ToRegister(op->index());
405}
406
407
408Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
409  if (op->IsRegister()) {
410    return ToRegister(op->index());
411  } else if (op->IsConstantOperand()) {
412    LConstantOperand* const_op = LConstantOperand::cast(op);
413    HConstant* constant = chunk_->LookupConstant(const_op);
414    Handle<Object> literal = constant->handle(isolate());
415    Representation r = chunk_->LookupLiteralRepresentation(const_op);
416    if (r.IsInteger32()) {
417      AllowDeferredHandleDereference get_number;
418      DCHECK(literal->IsNumber());
419      __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
420    } else if (r.IsDouble()) {
421      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
422    } else {
423      DCHECK(r.IsSmiOrTagged());
424      __ Move(scratch, literal);
425    }
426    return scratch;
427  } else if (op->IsStackSlot()) {
428    __ LoadP(scratch, ToMemOperand(op));
429    return scratch;
430  }
431  UNREACHABLE();
432  return scratch;
433}
434
435
436void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
437                                       Register dst) {
438  DCHECK(IsInteger32(const_op));
439  HConstant* constant = chunk_->LookupConstant(const_op);
440  int32_t value = constant->Integer32Value();
441  if (IsSmi(const_op)) {
442    __ LoadSmiLiteral(dst, Smi::FromInt(value));
443  } else {
444    __ LoadIntLiteral(dst, value);
445  }
446}
447
448
449DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
450  DCHECK(op->IsDoubleRegister());
451  return ToDoubleRegister(op->index());
452}
453
454
455Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
456  HConstant* constant = chunk_->LookupConstant(op);
457  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
458  return constant->handle(isolate());
459}
460
461
462bool LCodeGen::IsInteger32(LConstantOperand* op) const {
463  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
464}
465
466
467bool LCodeGen::IsSmi(LConstantOperand* op) const {
468  return chunk_->LookupLiteralRepresentation(op).IsSmi();
469}
470
471
472int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
473  return ToRepresentation(op, Representation::Integer32());
474}
475
476
477intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
478                                    const Representation& r) const {
479  HConstant* constant = chunk_->LookupConstant(op);
480  int32_t value = constant->Integer32Value();
481  if (r.IsInteger32()) return value;
482  DCHECK(r.IsSmiOrTagged());
483  return reinterpret_cast<intptr_t>(Smi::FromInt(value));
484}
485
486
487Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
488  HConstant* constant = chunk_->LookupConstant(op);
489  return Smi::FromInt(constant->Integer32Value());
490}
491
492
493double LCodeGen::ToDouble(LConstantOperand* op) const {
494  HConstant* constant = chunk_->LookupConstant(op);
495  DCHECK(constant->HasDoubleValue());
496  return constant->DoubleValue();
497}
498
499
500Operand LCodeGen::ToOperand(LOperand* op) {
501  if (op->IsConstantOperand()) {
502    LConstantOperand* const_op = LConstantOperand::cast(op);
503    HConstant* constant = chunk()->LookupConstant(const_op);
504    Representation r = chunk_->LookupLiteralRepresentation(const_op);
505    if (r.IsSmi()) {
506      DCHECK(constant->HasSmiValue());
507      return Operand(Smi::FromInt(constant->Integer32Value()));
508    } else if (r.IsInteger32()) {
509      DCHECK(constant->HasInteger32Value());
510      return Operand(constant->Integer32Value());
511    } else if (r.IsDouble()) {
512      Abort(kToOperandUnsupportedDoubleImmediate);
513    }
514    DCHECK(r.IsTagged());
515    return Operand(constant->handle(isolate()));
516  } else if (op->IsRegister()) {
517    return Operand(ToRegister(op));
518  } else if (op->IsDoubleRegister()) {
519    Abort(kToOperandIsDoubleRegisterUnimplemented);
520    return Operand::Zero();
521  }
522  // Stack slots not implemented, use ToMemOperand instead.
523  UNREACHABLE();
524  return Operand::Zero();
525}
526
527
528static int ArgumentsOffsetWithoutFrame(int index) {
529  DCHECK(index < 0);
530  return -(index + 1) * kPointerSize;
531}
532
533
534MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
535  DCHECK(!op->IsRegister());
536  DCHECK(!op->IsDoubleRegister());
537  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
538  if (NeedsEagerFrame()) {
539    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
540  } else {
541    // Retrieve parameter without eager stack-frame relative to the
542    // stack-pointer.
543    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
544  }
545}
546
547
548MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
549  DCHECK(op->IsDoubleStackSlot());
550  if (NeedsEagerFrame()) {
551    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
552  } else {
553    // Retrieve parameter without eager stack-frame relative to the
554    // stack-pointer.
555    return MemOperand(sp,
556                      ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
557  }
558}
559
560
561void LCodeGen::WriteTranslation(LEnvironment* environment,
562                                Translation* translation) {
563  if (environment == NULL) return;
564
565  // The translation includes one command per value in the environment.
566  int translation_size = environment->translation_size();
567
568  WriteTranslation(environment->outer(), translation);
569  WriteTranslationFrame(environment, translation);
570
571  int object_index = 0;
572  int dematerialized_index = 0;
573  for (int i = 0; i < translation_size; ++i) {
574    LOperand* value = environment->values()->at(i);
575    AddToTranslation(
576        environment, translation, value, environment->HasTaggedValueAt(i),
577        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
578  }
579}
580
581
582void LCodeGen::AddToTranslation(LEnvironment* environment,
583                                Translation* translation, LOperand* op,
584                                bool is_tagged, bool is_uint32,
585                                int* object_index_pointer,
586                                int* dematerialized_index_pointer) {
587  if (op == LEnvironment::materialization_marker()) {
588    int object_index = (*object_index_pointer)++;
589    if (environment->ObjectIsDuplicateAt(object_index)) {
590      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
591      translation->DuplicateObject(dupe_of);
592      return;
593    }
594    int object_length = environment->ObjectLengthAt(object_index);
595    if (environment->ObjectIsArgumentsAt(object_index)) {
596      translation->BeginArgumentsObject(object_length);
597    } else {
598      translation->BeginCapturedObject(object_length);
599    }
600    int dematerialized_index = *dematerialized_index_pointer;
601    int env_offset = environment->translation_size() + dematerialized_index;
602    *dematerialized_index_pointer += object_length;
603    for (int i = 0; i < object_length; ++i) {
604      LOperand* value = environment->values()->at(env_offset + i);
605      AddToTranslation(environment, translation, value,
606                       environment->HasTaggedValueAt(env_offset + i),
607                       environment->HasUint32ValueAt(env_offset + i),
608                       object_index_pointer, dematerialized_index_pointer);
609    }
610    return;
611  }
612
613  if (op->IsStackSlot()) {
614    int index = op->index();
615    if (is_tagged) {
616      translation->StoreStackSlot(index);
617    } else if (is_uint32) {
618      translation->StoreUint32StackSlot(index);
619    } else {
620      translation->StoreInt32StackSlot(index);
621    }
622  } else if (op->IsDoubleStackSlot()) {
623    int index = op->index();
624    translation->StoreDoubleStackSlot(index);
625  } else if (op->IsRegister()) {
626    Register reg = ToRegister(op);
627    if (is_tagged) {
628      translation->StoreRegister(reg);
629    } else if (is_uint32) {
630      translation->StoreUint32Register(reg);
631    } else {
632      translation->StoreInt32Register(reg);
633    }
634  } else if (op->IsDoubleRegister()) {
635    DoubleRegister reg = ToDoubleRegister(op);
636    translation->StoreDoubleRegister(reg);
637  } else if (op->IsConstantOperand()) {
638    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
639    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
640    translation->StoreLiteral(src_index);
641  } else {
642    UNREACHABLE();
643  }
644}
645
646
647void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
648                        LInstruction* instr) {
649  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
650}
651
652
653void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
654                               LInstruction* instr,
655                               SafepointMode safepoint_mode) {
656  DCHECK(instr != NULL);
657  __ Call(code, mode);
658  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
659
660  // Signal that we don't inline smi code before these stubs in the
661  // optimizing code generator.
662  if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
663    __ nop();
664  }
665}
666
667
668void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
669                           LInstruction* instr, SaveFPRegsMode save_doubles) {
670  DCHECK(instr != NULL);
671
672  __ CallRuntime(function, num_arguments, save_doubles);
673
674  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
675}
676
677
678void LCodeGen::LoadContextFromDeferred(LOperand* context) {
679  if (context->IsRegister()) {
680    __ Move(cp, ToRegister(context));
681  } else if (context->IsStackSlot()) {
682    __ LoadP(cp, ToMemOperand(context));
683  } else if (context->IsConstantOperand()) {
684    HConstant* constant =
685        chunk_->LookupConstant(LConstantOperand::cast(context));
686    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
687  } else {
688    UNREACHABLE();
689  }
690}
691
692
693void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
694                                       LInstruction* instr, LOperand* context) {
695  LoadContextFromDeferred(context);
696  __ CallRuntimeSaveDoubles(id);
697  RecordSafepointWithRegisters(instr->pointer_map(), argc,
698                               Safepoint::kNoLazyDeopt);
699}
700
701
702void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
703                                                    Safepoint::DeoptMode mode) {
704  environment->set_has_been_used();
705  if (!environment->HasBeenRegistered()) {
706    // Physical stack frame layout:
707    // -x ............. -4  0 ..................................... y
708    // [incoming arguments] [spill slots] [pushed outgoing arguments]
709
710    // Layout of the environment:
711    // 0 ..................................................... size-1
712    // [parameters] [locals] [expression stack including arguments]
713
714    // Layout of the translation:
715    // 0 ........................................................ size - 1 + 4
716    // [expression stack including arguments] [locals] [4 words] [parameters]
717    // |>------------  translation_size ------------<|
718
719    int frame_count = 0;
720    int jsframe_count = 0;
721    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
722      ++frame_count;
723      if (e->frame_type() == JS_FUNCTION) {
724        ++jsframe_count;
725      }
726    }
727    Translation translation(&translations_, frame_count, jsframe_count, zone());
728    WriteTranslation(environment, &translation);
729    int deoptimization_index = deoptimizations_.length();
730    int pc_offset = masm()->pc_offset();
731    environment->Register(deoptimization_index, translation.index(),
732                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
733    deoptimizations_.Add(environment, zone());
734  }
735}
736
737void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
738                            DeoptimizeReason deopt_reason,
739                            Deoptimizer::BailoutType bailout_type,
740                            CRegister cr) {
741  LEnvironment* environment = instr->environment();
742  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
743  DCHECK(environment->HasBeenRegistered());
744  int id = environment->deoptimization_index();
745  Address entry =
746      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
747  if (entry == NULL) {
748    Abort(kBailoutWasNotPrepared);
749    return;
750  }
751
752  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
753    CRegister alt_cr = cr6;
754    Register scratch = scratch0();
755    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
756    Label no_deopt;
757    DCHECK(!alt_cr.is(cr));
758    __ Push(r4, scratch);
759    __ mov(scratch, Operand(count));
760    __ lwz(r4, MemOperand(scratch));
761    __ subi(r4, r4, Operand(1));
762    __ cmpi(r4, Operand::Zero(), alt_cr);
763    __ bne(&no_deopt, alt_cr);
764    __ li(r4, Operand(FLAG_deopt_every_n_times));
765    __ stw(r4, MemOperand(scratch));
766    __ Pop(r4, scratch);
767
768    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
769    __ bind(&no_deopt);
770    __ stw(r4, MemOperand(scratch));
771    __ Pop(r4, scratch);
772  }
773
774  if (info()->ShouldTrapOnDeopt()) {
775    __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
776  }
777
778  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
779
780  DCHECK(info()->IsStub() || frame_is_built_);
781  // Go through jump table if we need to handle condition, build frame, or
782  // restore caller doubles.
783  if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
784    DeoptComment(deopt_info);
785    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
786  } else {
787    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
788                                            !frame_is_built_);
789    // We often have several deopts to the same entry, reuse the last
790    // jump entry if this is the case.
791    if (FLAG_trace_deopt || isolate()->is_profiling() ||
792        jump_table_.is_empty() ||
793        !table_entry.IsEquivalentTo(jump_table_.last())) {
794      jump_table_.Add(table_entry, zone());
795    }
796    __ b(cond, &jump_table_.last().label, cr);
797  }
798}
799
800void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
801                            DeoptimizeReason deopt_reason, CRegister cr) {
802  Deoptimizer::BailoutType bailout_type =
803      info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
804  DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
805}
806
807
808void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
809                                            SafepointMode safepoint_mode) {
810  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
811    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
812  } else {
813    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
814    RecordSafepointWithRegisters(instr->pointer_map(), 0,
815                                 Safepoint::kLazyDeopt);
816  }
817}
818
819
820void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
821                               int arguments, Safepoint::DeoptMode deopt_mode) {
822  DCHECK(expected_safepoint_kind_ == kind);
823
824  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
825  Safepoint safepoint =
826      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
827  for (int i = 0; i < operands->length(); i++) {
828    LOperand* pointer = operands->at(i);
829    if (pointer->IsStackSlot()) {
830      safepoint.DefinePointerSlot(pointer->index(), zone());
831    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
832      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
833    }
834  }
835}
836
837
838void LCodeGen::RecordSafepoint(LPointerMap* pointers,
839                               Safepoint::DeoptMode deopt_mode) {
840  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
841}
842
843
844void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
845  LPointerMap empty_pointers(zone());
846  RecordSafepoint(&empty_pointers, deopt_mode);
847}
848
849
850void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
851                                            int arguments,
852                                            Safepoint::DeoptMode deopt_mode) {
853  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
854}
855
856
857static const char* LabelType(LLabel* label) {
858  if (label->is_loop_header()) return " (loop header)";
859  if (label->is_osr_entry()) return " (OSR entry)";
860  return "";
861}
862
863
864void LCodeGen::DoLabel(LLabel* label) {
865  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
866          current_instruction_, label->hydrogen_value()->id(),
867          label->block_id(), LabelType(label));
868  __ bind(label->label());
869  current_block_ = label->block_id();
870  DoGap(label);
871}
872
873
874void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
875
876
877void LCodeGen::DoGap(LGap* gap) {
878  for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
879       i++) {
880    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
881    LParallelMove* move = gap->GetParallelMove(inner_pos);
882    if (move != NULL) DoParallelMove(move);
883  }
884}
885
886
887void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
888
889
890void LCodeGen::DoParameter(LParameter* instr) {
891  // Nothing to do.
892}
893
894
895void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
896  GenerateOsrPrologue();
897}
898
899
900void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
901  Register dividend = ToRegister(instr->dividend());
902  int32_t divisor = instr->divisor();
903  DCHECK(dividend.is(ToRegister(instr->result())));
904
905  // Theoretically, a variation of the branch-free code for integer division by
906  // a power of 2 (calculating the remainder via an additional multiplication
907  // (which gets simplified to an 'and') and subtraction) should be faster, and
908  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
909  // indicate that positive dividends are heavily favored, so the branching
910  // version performs better.
911  HMod* hmod = instr->hydrogen();
912  int32_t shift = WhichPowerOf2Abs(divisor);
913  Label dividend_is_not_negative, done;
914  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
915    __ cmpwi(dividend, Operand::Zero());
916    __ bge(&dividend_is_not_negative);
917    if (shift) {
918      // Note that this is correct even for kMinInt operands.
919      __ neg(dividend, dividend);
920      __ ExtractBitRange(dividend, dividend, shift - 1, 0);
921      __ neg(dividend, dividend, LeaveOE, SetRC);
922      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
923        DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
924      }
925    } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
926      __ li(dividend, Operand::Zero());
927    } else {
928      DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
929    }
930    __ b(&done);
931  }
932
933  __ bind(&dividend_is_not_negative);
934  if (shift) {
935    __ ExtractBitRange(dividend, dividend, shift - 1, 0);
936  } else {
937    __ li(dividend, Operand::Zero());
938  }
939  __ bind(&done);
940}
941
942
943void LCodeGen::DoModByConstI(LModByConstI* instr) {
944  Register dividend = ToRegister(instr->dividend());
945  int32_t divisor = instr->divisor();
946  Register result = ToRegister(instr->result());
947  DCHECK(!dividend.is(result));
948
949  if (divisor == 0) {
950    DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
951    return;
952  }
953
954  __ TruncatingDiv(result, dividend, Abs(divisor));
955  __ mov(ip, Operand(Abs(divisor)));
956  __ mullw(result, result, ip);
957  __ sub(result, dividend, result, LeaveOE, SetRC);
958
959  // Check for negative zero.
960  HMod* hmod = instr->hydrogen();
961  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
962    Label remainder_not_zero;
963    __ bne(&remainder_not_zero, cr0);
964    __ cmpwi(dividend, Operand::Zero());
965    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
966    __ bind(&remainder_not_zero);
967  }
968}
969
970
971void LCodeGen::DoModI(LModI* instr) {
972  HMod* hmod = instr->hydrogen();
973  Register left_reg = ToRegister(instr->left());
974  Register right_reg = ToRegister(instr->right());
975  Register result_reg = ToRegister(instr->result());
976  Register scratch = scratch0();
977  bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
978  Label done;
979
980  if (can_overflow) {
981    __ li(r0, Operand::Zero());  // clear xer
982    __ mtxer(r0);
983  }
984
985  __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
986
987  // Check for x % 0.
988  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
989    __ cmpwi(right_reg, Operand::Zero());
990    DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
991  }
992
993  // Check for kMinInt % -1, divw will return undefined, which is not what we
994  // want. We have to deopt if we care about -0, because we can't return that.
995  if (can_overflow) {
996    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
997      DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0);
998    } else {
999      if (CpuFeatures::IsSupported(ISELECT)) {
1000        __ isel(overflow, result_reg, r0, result_reg, cr0);
1001        __ boverflow(&done, cr0);
1002      } else {
1003        Label no_overflow_possible;
1004        __ bnooverflow(&no_overflow_possible, cr0);
1005        __ li(result_reg, Operand::Zero());
1006        __ b(&done);
1007        __ bind(&no_overflow_possible);
1008      }
1009    }
1010  }
1011
1012  __ mullw(scratch, right_reg, scratch);
1013  __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1014
1015  // If we care about -0, test if the dividend is <0 and the result is 0.
1016  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1017    __ bne(&done, cr0);
1018    __ cmpwi(left_reg, Operand::Zero());
1019    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1020  }
1021
1022  __ bind(&done);
1023}
1024
1025
1026void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1027  Register dividend = ToRegister(instr->dividend());
1028  int32_t divisor = instr->divisor();
1029  Register result = ToRegister(instr->result());
1030  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1031  DCHECK(!result.is(dividend));
1032
1033  // Check for (0 / -x) that will produce negative zero.
1034  HDiv* hdiv = instr->hydrogen();
1035  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1036    __ cmpwi(dividend, Operand::Zero());
1037    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1038  }
1039  // Check for (kMinInt / -1).
1040  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1041    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1042    __ cmpw(dividend, r0);
1043    DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1044  }
1045
1046  int32_t shift = WhichPowerOf2Abs(divisor);
1047
1048  // Deoptimize if remainder will not be 0.
1049  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1050    __ TestBitRange(dividend, shift - 1, 0, r0);
1051    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
1052  }
1053
1054  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1055    __ neg(result, dividend);
1056    return;
1057  }
1058  if (shift == 0) {
1059    __ mr(result, dividend);
1060  } else {
1061    if (shift == 1) {
1062      __ srwi(result, dividend, Operand(31));
1063    } else {
1064      __ srawi(result, dividend, 31);
1065      __ srwi(result, result, Operand(32 - shift));
1066    }
1067    __ add(result, dividend, result);
1068    __ srawi(result, result, shift);
1069  }
1070  if (divisor < 0) __ neg(result, result);
1071}
1072
1073
1074void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1075  Register dividend = ToRegister(instr->dividend());
1076  int32_t divisor = instr->divisor();
1077  Register result = ToRegister(instr->result());
1078  DCHECK(!dividend.is(result));
1079
1080  if (divisor == 0) {
1081    DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1082    return;
1083  }
1084
1085  // Check for (0 / -x) that will produce negative zero.
1086  HDiv* hdiv = instr->hydrogen();
1087  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1088    __ cmpwi(dividend, Operand::Zero());
1089    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1090  }
1091
1092  __ TruncatingDiv(result, dividend, Abs(divisor));
1093  if (divisor < 0) __ neg(result, result);
1094
1095  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1096    Register scratch = scratch0();
1097    __ mov(ip, Operand(divisor));
1098    __ mullw(scratch, result, ip);
1099    __ cmpw(scratch, dividend);
1100    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
1101  }
1102}
1103
1104
1105// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1106void LCodeGen::DoDivI(LDivI* instr) {
1107  HBinaryOperation* hdiv = instr->hydrogen();
1108  const Register dividend = ToRegister(instr->dividend());
1109  const Register divisor = ToRegister(instr->divisor());
1110  Register result = ToRegister(instr->result());
1111  bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1112
1113  DCHECK(!dividend.is(result));
1114  DCHECK(!divisor.is(result));
1115
1116  if (can_overflow) {
1117    __ li(r0, Operand::Zero());  // clear xer
1118    __ mtxer(r0);
1119  }
1120
1121  __ divw(result, dividend, divisor, SetOE, SetRC);
1122
1123  // Check for x / 0.
1124  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1125    __ cmpwi(divisor, Operand::Zero());
1126    DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
1127  }
1128
1129  // Check for (0 / -x) that will produce negative zero.
1130  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1131    Label dividend_not_zero;
1132    __ cmpwi(dividend, Operand::Zero());
1133    __ bne(&dividend_not_zero);
1134    __ cmpwi(divisor, Operand::Zero());
1135    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1136    __ bind(&dividend_not_zero);
1137  }
1138
1139  // Check for (kMinInt / -1).
1140  if (can_overflow) {
1141    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1142      DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1143    } else {
1144      // When truncating, we want kMinInt / -1 = kMinInt.
1145      if (CpuFeatures::IsSupported(ISELECT)) {
1146        __ isel(overflow, result, dividend, result, cr0);
1147      } else {
1148        Label no_overflow_possible;
1149        __ bnooverflow(&no_overflow_possible, cr0);
1150        __ mr(result, dividend);
1151        __ bind(&no_overflow_possible);
1152      }
1153    }
1154  }
1155
1156#if V8_TARGET_ARCH_PPC64
1157  __ extsw(result, result);
1158#endif
1159
1160  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1161    // Deoptimize if remainder is not 0.
1162    Register scratch = scratch0();
1163    __ mullw(scratch, divisor, result);
1164    __ cmpw(dividend, scratch);
1165    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
1166  }
1167}
1168
1169
1170void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1171  HBinaryOperation* hdiv = instr->hydrogen();
1172  Register dividend = ToRegister(instr->dividend());
1173  Register result = ToRegister(instr->result());
1174  int32_t divisor = instr->divisor();
1175  bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1176
1177  // If the divisor is positive, things are easy: There can be no deopts and we
1178  // can simply do an arithmetic right shift.
1179  int32_t shift = WhichPowerOf2Abs(divisor);
1180  if (divisor > 0) {
1181    if (shift || !result.is(dividend)) {
1182      __ srawi(result, dividend, shift);
1183    }
1184    return;
1185  }
1186
1187  // If the divisor is negative, we have to negate and handle edge cases.
1188  OEBit oe = LeaveOE;
1189#if V8_TARGET_ARCH_PPC64
1190  if (divisor == -1 && can_overflow) {
1191    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1192    __ cmpw(dividend, r0);
1193    DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
1194  }
1195#else
1196  if (can_overflow) {
1197    __ li(r0, Operand::Zero());  // clear xer
1198    __ mtxer(r0);
1199    oe = SetOE;
1200  }
1201#endif
1202
1203  __ neg(result, dividend, oe, SetRC);
1204  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1205    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
1206  }
1207
1208// If the negation could not overflow, simply shifting is OK.
1209#if !V8_TARGET_ARCH_PPC64
1210  if (!can_overflow) {
1211#endif
1212    if (shift) {
1213      __ ShiftRightArithImm(result, result, shift);
1214    }
1215    return;
1216#if !V8_TARGET_ARCH_PPC64
1217  }
1218
1219  // Dividing by -1 is basically negation, unless we overflow.
1220  if (divisor == -1) {
1221    DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1222    return;
1223  }
1224
1225  Label overflow, done;
1226  __ boverflow(&overflow, cr0);
1227  __ srawi(result, result, shift);
1228  __ b(&done);
1229  __ bind(&overflow);
1230  __ mov(result, Operand(kMinInt / divisor));
1231  __ bind(&done);
1232#endif
1233}
1234
1235
1236void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1237  Register dividend = ToRegister(instr->dividend());
1238  int32_t divisor = instr->divisor();
1239  Register result = ToRegister(instr->result());
1240  DCHECK(!dividend.is(result));
1241
1242  if (divisor == 0) {
1243    DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
1244    return;
1245  }
1246
1247  // Check for (0 / -x) that will produce negative zero.
1248  HMathFloorOfDiv* hdiv = instr->hydrogen();
1249  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1250    __ cmpwi(dividend, Operand::Zero());
1251    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1252  }
1253
1254  // Easy case: We need no dynamic check for the dividend and the flooring
1255  // division is the same as the truncating division.
1256  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1257      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1258    __ TruncatingDiv(result, dividend, Abs(divisor));
1259    if (divisor < 0) __ neg(result, result);
1260    return;
1261  }
1262
1263  // In the general case we may need to adjust before and after the truncating
1264  // division to get a flooring division.
1265  Register temp = ToRegister(instr->temp());
1266  DCHECK(!temp.is(dividend) && !temp.is(result));
1267  Label needs_adjustment, done;
1268  __ cmpwi(dividend, Operand::Zero());
1269  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1270  __ TruncatingDiv(result, dividend, Abs(divisor));
1271  if (divisor < 0) __ neg(result, result);
1272  __ b(&done);
1273  __ bind(&needs_adjustment);
1274  __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1275  __ TruncatingDiv(result, temp, Abs(divisor));
1276  if (divisor < 0) __ neg(result, result);
1277  __ subi(result, result, Operand(1));
1278  __ bind(&done);
1279}
1280
1281
1282// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1283void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1284  HBinaryOperation* hdiv = instr->hydrogen();
1285  const Register dividend = ToRegister(instr->dividend());
1286  const Register divisor = ToRegister(instr->divisor());
1287  Register result = ToRegister(instr->result());
1288  bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1289
1290  DCHECK(!dividend.is(result));
1291  DCHECK(!divisor.is(result));
1292
1293  if (can_overflow) {
1294    __ li(r0, Operand::Zero());  // clear xer
1295    __ mtxer(r0);
1296  }
1297
1298  __ divw(result, dividend, divisor, SetOE, SetRC);
1299
1300  // Check for x / 0.
1301  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1302    __ cmpwi(divisor, Operand::Zero());
1303    DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
1304  }
1305
1306  // Check for (0 / -x) that will produce negative zero.
1307  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1308    Label dividend_not_zero;
1309    __ cmpwi(dividend, Operand::Zero());
1310    __ bne(&dividend_not_zero);
1311    __ cmpwi(divisor, Operand::Zero());
1312    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1313    __ bind(&dividend_not_zero);
1314  }
1315
1316  // Check for (kMinInt / -1).
1317  if (can_overflow) {
1318    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1319      DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1320    } else {
1321      // When truncating, we want kMinInt / -1 = kMinInt.
1322      if (CpuFeatures::IsSupported(ISELECT)) {
1323        __ isel(overflow, result, dividend, result, cr0);
1324      } else {
1325        Label no_overflow_possible;
1326        __ bnooverflow(&no_overflow_possible, cr0);
1327        __ mr(result, dividend);
1328        __ bind(&no_overflow_possible);
1329      }
1330    }
1331  }
1332
1333  Label done;
1334  Register scratch = scratch0();
1335// If both operands have the same sign then we are done.
1336#if V8_TARGET_ARCH_PPC64
1337  __ xor_(scratch, dividend, divisor);
1338  __ cmpwi(scratch, Operand::Zero());
1339  __ bge(&done);
1340#else
1341  __ xor_(scratch, dividend, divisor, SetRC);
1342  __ bge(&done, cr0);
1343#endif
1344
1345  // If there is no remainder then we are done.
1346  __ mullw(scratch, divisor, result);
1347  __ cmpw(dividend, scratch);
1348  __ beq(&done);
1349
1350  // We performed a truncating division. Correct the result.
1351  __ subi(result, result, Operand(1));
1352  __ bind(&done);
1353#if V8_TARGET_ARCH_PPC64
1354  __ extsw(result, result);
1355#endif
1356}
1357
1358
1359void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1360  DoubleRegister addend = ToDoubleRegister(instr->addend());
1361  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1362  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1363  DoubleRegister result = ToDoubleRegister(instr->result());
1364
1365  __ fmadd(result, multiplier, multiplicand, addend);
1366}
1367
1368
1369void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1370  DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1371  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1372  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1373  DoubleRegister result = ToDoubleRegister(instr->result());
1374
1375  __ fmsub(result, multiplier, multiplicand, minuend);
1376}
1377
1378
1379void LCodeGen::DoMulI(LMulI* instr) {
1380  Register scratch = scratch0();
1381  Register result = ToRegister(instr->result());
1382  // Note that result may alias left.
1383  Register left = ToRegister(instr->left());
1384  LOperand* right_op = instr->right();
1385
1386  bool bailout_on_minus_zero =
1387      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1388  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1389
1390  if (right_op->IsConstantOperand()) {
1391    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1392
1393    if (bailout_on_minus_zero && (constant < 0)) {
1394      // The case of a null constant will be handled separately.
1395      // If constant is negative and left is null, the result should be -0.
1396      __ cmpi(left, Operand::Zero());
1397      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1398    }
1399
1400    switch (constant) {
1401      case -1:
1402        if (can_overflow) {
1403#if V8_TARGET_ARCH_PPC64
1404          if (instr->hydrogen()->representation().IsSmi()) {
1405#endif
1406            __ li(r0, Operand::Zero());  // clear xer
1407            __ mtxer(r0);
1408            __ neg(result, left, SetOE, SetRC);
1409            DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
1410#if V8_TARGET_ARCH_PPC64
1411          } else {
1412            __ neg(result, left);
1413            __ TestIfInt32(result, r0);
1414            DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1415          }
1416#endif
1417        } else {
1418          __ neg(result, left);
1419        }
1420        break;
1421      case 0:
1422        if (bailout_on_minus_zero) {
1423// If left is strictly negative and the constant is null, the
1424// result is -0. Deoptimize if required, otherwise return 0.
1425#if V8_TARGET_ARCH_PPC64
1426          if (instr->hydrogen()->representation().IsSmi()) {
1427#endif
1428            __ cmpi(left, Operand::Zero());
1429#if V8_TARGET_ARCH_PPC64
1430          } else {
1431            __ cmpwi(left, Operand::Zero());
1432          }
1433#endif
1434          DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
1435        }
1436        __ li(result, Operand::Zero());
1437        break;
1438      case 1:
1439        __ Move(result, left);
1440        break;
1441      default:
1442        // Multiplying by powers of two and powers of two plus or minus
1443        // one can be done faster with shifted operands.
1444        // For other constants we emit standard code.
1445        int32_t mask = constant >> 31;
1446        uint32_t constant_abs = (constant + mask) ^ mask;
1447
1448        if (base::bits::IsPowerOfTwo32(constant_abs)) {
1449          int32_t shift = WhichPowerOf2(constant_abs);
1450          __ ShiftLeftImm(result, left, Operand(shift));
1451          // Correct the sign of the result if the constant is negative.
1452          if (constant < 0) __ neg(result, result);
1453        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1454          int32_t shift = WhichPowerOf2(constant_abs - 1);
1455          __ ShiftLeftImm(scratch, left, Operand(shift));
1456          __ add(result, scratch, left);
1457          // Correct the sign of the result if the constant is negative.
1458          if (constant < 0) __ neg(result, result);
1459        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1460          int32_t shift = WhichPowerOf2(constant_abs + 1);
1461          __ ShiftLeftImm(scratch, left, Operand(shift));
1462          __ sub(result, scratch, left);
1463          // Correct the sign of the result if the constant is negative.
1464          if (constant < 0) __ neg(result, result);
1465        } else {
1466          // Generate standard code.
1467          __ mov(ip, Operand(constant));
1468          __ Mul(result, left, ip);
1469        }
1470    }
1471
1472  } else {
1473    DCHECK(right_op->IsRegister());
1474    Register right = ToRegister(right_op);
1475
1476    if (can_overflow) {
1477#if V8_TARGET_ARCH_PPC64
1478      // result = left * right.
1479      if (instr->hydrogen()->representation().IsSmi()) {
1480        __ SmiUntag(result, left);
1481        __ SmiUntag(scratch, right);
1482        __ Mul(result, result, scratch);
1483      } else {
1484        __ Mul(result, left, right);
1485      }
1486      __ TestIfInt32(result, r0);
1487      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1488      if (instr->hydrogen()->representation().IsSmi()) {
1489        __ SmiTag(result);
1490      }
1491#else
1492      // scratch:result = left * right.
1493      if (instr->hydrogen()->representation().IsSmi()) {
1494        __ SmiUntag(result, left);
1495        __ mulhw(scratch, result, right);
1496        __ mullw(result, result, right);
1497      } else {
1498        __ mulhw(scratch, left, right);
1499        __ mullw(result, left, right);
1500      }
1501      __ TestIfInt32(scratch, result, r0);
1502      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1503#endif
1504    } else {
1505      if (instr->hydrogen()->representation().IsSmi()) {
1506        __ SmiUntag(result, left);
1507        __ Mul(result, result, right);
1508      } else {
1509        __ Mul(result, left, right);
1510      }
1511    }
1512
1513    if (bailout_on_minus_zero) {
1514      Label done;
1515#if V8_TARGET_ARCH_PPC64
1516      if (instr->hydrogen()->representation().IsSmi()) {
1517#endif
1518        __ xor_(r0, left, right, SetRC);
1519        __ bge(&done, cr0);
1520#if V8_TARGET_ARCH_PPC64
1521      } else {
1522        __ xor_(r0, left, right);
1523        __ cmpwi(r0, Operand::Zero());
1524        __ bge(&done);
1525      }
1526#endif
1527      // Bail out if the result is minus zero.
1528      __ cmpi(result, Operand::Zero());
1529      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
1530      __ bind(&done);
1531    }
1532  }
1533}
1534
1535
1536void LCodeGen::DoBitI(LBitI* instr) {
1537  LOperand* left_op = instr->left();
1538  LOperand* right_op = instr->right();
1539  DCHECK(left_op->IsRegister());
1540  Register left = ToRegister(left_op);
1541  Register result = ToRegister(instr->result());
1542  Operand right(no_reg);
1543
1544  if (right_op->IsStackSlot()) {
1545    right = Operand(EmitLoadRegister(right_op, ip));
1546  } else {
1547    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1548    right = ToOperand(right_op);
1549
1550    if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1551      switch (instr->op()) {
1552        case Token::BIT_AND:
1553          __ andi(result, left, right);
1554          break;
1555        case Token::BIT_OR:
1556          __ ori(result, left, right);
1557          break;
1558        case Token::BIT_XOR:
1559          __ xori(result, left, right);
1560          break;
1561        default:
1562          UNREACHABLE();
1563          break;
1564      }
1565      return;
1566    }
1567  }
1568
1569  switch (instr->op()) {
1570    case Token::BIT_AND:
1571      __ And(result, left, right);
1572      break;
1573    case Token::BIT_OR:
1574      __ Or(result, left, right);
1575      break;
1576    case Token::BIT_XOR:
1577      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1578        __ notx(result, left);
1579      } else {
1580        __ Xor(result, left, right);
1581      }
1582      break;
1583    default:
1584      UNREACHABLE();
1585      break;
1586  }
1587}
1588
1589
1590void LCodeGen::DoShiftI(LShiftI* instr) {
1591  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1592  // result may alias either of them.
1593  LOperand* right_op = instr->right();
1594  Register left = ToRegister(instr->left());
1595  Register result = ToRegister(instr->result());
1596  Register scratch = scratch0();
1597  if (right_op->IsRegister()) {
1598    // Mask the right_op operand.
1599    __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1600    switch (instr->op()) {
1601      case Token::ROR:
1602        // rotate_right(a, b) == rotate_left(a, 32 - b)
1603        __ subfic(scratch, scratch, Operand(32));
1604        __ rotlw(result, left, scratch);
1605        break;
1606      case Token::SAR:
1607        __ sraw(result, left, scratch);
1608        break;
1609      case Token::SHR:
1610        if (instr->can_deopt()) {
1611          __ srw(result, left, scratch, SetRC);
1612#if V8_TARGET_ARCH_PPC64
1613          __ extsw(result, result, SetRC);
1614#endif
1615          DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
1616        } else {
1617          __ srw(result, left, scratch);
1618        }
1619        break;
1620      case Token::SHL:
1621        __ slw(result, left, scratch);
1622#if V8_TARGET_ARCH_PPC64
1623        __ extsw(result, result);
1624#endif
1625        break;
1626      default:
1627        UNREACHABLE();
1628        break;
1629    }
1630  } else {
1631    // Mask the right_op operand.
1632    int value = ToInteger32(LConstantOperand::cast(right_op));
1633    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1634    switch (instr->op()) {
1635      case Token::ROR:
1636        if (shift_count != 0) {
1637          __ rotrwi(result, left, shift_count);
1638        } else {
1639          __ Move(result, left);
1640        }
1641        break;
1642      case Token::SAR:
1643        if (shift_count != 0) {
1644          __ srawi(result, left, shift_count);
1645        } else {
1646          __ Move(result, left);
1647        }
1648        break;
1649      case Token::SHR:
1650        if (shift_count != 0) {
1651          __ srwi(result, left, Operand(shift_count));
1652        } else {
1653          if (instr->can_deopt()) {
1654            __ cmpwi(left, Operand::Zero());
1655            DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
1656          }
1657          __ Move(result, left);
1658        }
1659        break;
1660      case Token::SHL:
1661        if (shift_count != 0) {
1662#if V8_TARGET_ARCH_PPC64
1663          if (instr->hydrogen_value()->representation().IsSmi()) {
1664            __ sldi(result, left, Operand(shift_count));
1665#else
1666          if (instr->hydrogen_value()->representation().IsSmi() &&
1667              instr->can_deopt()) {
1668            if (shift_count != 1) {
1669              __ slwi(result, left, Operand(shift_count - 1));
1670              __ SmiTagCheckOverflow(result, result, scratch);
1671            } else {
1672              __ SmiTagCheckOverflow(result, left, scratch);
1673            }
1674            DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1675#endif
1676          } else {
1677            __ slwi(result, left, Operand(shift_count));
1678#if V8_TARGET_ARCH_PPC64
1679            __ extsw(result, result);
1680#endif
1681          }
1682        } else {
1683          __ Move(result, left);
1684        }
1685        break;
1686      default:
1687        UNREACHABLE();
1688        break;
1689    }
1690  }
1691}
1692
1693
1694void LCodeGen::DoSubI(LSubI* instr) {
1695  LOperand* right = instr->right();
1696  Register left = ToRegister(instr->left());
1697  Register result = ToRegister(instr->result());
1698  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1699#if V8_TARGET_ARCH_PPC64
1700  const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1701#else
1702  const bool isInteger = false;
1703#endif
1704  if (!can_overflow || isInteger) {
1705    if (right->IsConstantOperand()) {
1706      __ Add(result, left, -(ToOperand(right).immediate()), r0);
1707    } else {
1708      __ sub(result, left, EmitLoadRegister(right, ip));
1709    }
1710    if (can_overflow) {
1711#if V8_TARGET_ARCH_PPC64
1712      __ TestIfInt32(result, r0);
1713#else
1714      __ TestIfInt32(scratch0(), result, r0);
1715#endif
1716      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1717    }
1718
1719  } else {
1720    if (right->IsConstantOperand()) {
1721      __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1722                                scratch0(), r0);
1723    } else {
1724      __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1725                                scratch0(), r0);
1726    }
1727    DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1728  }
1729}
1730
1731
1732void LCodeGen::DoRSubI(LRSubI* instr) {
1733  LOperand* left = instr->left();
1734  LOperand* right = instr->right();
1735  LOperand* result = instr->result();
1736
1737  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1738         right->IsConstantOperand());
1739
1740  Operand right_operand = ToOperand(right);
1741  if (is_int16(right_operand.immediate())) {
1742    __ subfic(ToRegister(result), ToRegister(left), right_operand);
1743  } else {
1744    __ mov(r0, right_operand);
1745    __ sub(ToRegister(result), r0, ToRegister(left));
1746  }
1747}
1748
1749
1750void LCodeGen::DoConstantI(LConstantI* instr) {
1751  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1752}
1753
1754
1755void LCodeGen::DoConstantS(LConstantS* instr) {
1756  __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1757}
1758
1759
1760void LCodeGen::DoConstantD(LConstantD* instr) {
1761  DCHECK(instr->result()->IsDoubleRegister());
1762  DoubleRegister result = ToDoubleRegister(instr->result());
1763#if V8_HOST_ARCH_IA32
1764  // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1765  // builds.
1766  uint64_t bits = instr->bits();
1767  if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1768      V8_UINT64_C(0x7FF0000000000000)) {
1769    uint32_t lo = static_cast<uint32_t>(bits);
1770    uint32_t hi = static_cast<uint32_t>(bits >> 32);
1771    __ mov(ip, Operand(lo));
1772    __ mov(scratch0(), Operand(hi));
1773    __ MovInt64ToDouble(result, scratch0(), ip);
1774    return;
1775  }
1776#endif
1777  double v = instr->value();
1778  __ LoadDoubleLiteral(result, v, scratch0());
1779}
1780
1781
1782void LCodeGen::DoConstantE(LConstantE* instr) {
1783  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1784}
1785
1786
1787void LCodeGen::DoConstantT(LConstantT* instr) {
1788  Handle<Object> object = instr->value(isolate());
1789  AllowDeferredHandleDereference smi_check;
1790  __ Move(ToRegister(instr->result()), object);
1791}
1792
1793
1794MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1795                                           String::Encoding encoding) {
1796  if (index->IsConstantOperand()) {
1797    int offset = ToInteger32(LConstantOperand::cast(index));
1798    if (encoding == String::TWO_BYTE_ENCODING) {
1799      offset *= kUC16Size;
1800    }
1801    STATIC_ASSERT(kCharSize == 1);
1802    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1803  }
1804  Register scratch = scratch0();
1805  DCHECK(!scratch.is(string));
1806  DCHECK(!scratch.is(ToRegister(index)));
1807  if (encoding == String::ONE_BYTE_ENCODING) {
1808    __ add(scratch, string, ToRegister(index));
1809  } else {
1810    STATIC_ASSERT(kUC16Size == 2);
1811    __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1812    __ add(scratch, string, scratch);
1813  }
1814  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1815}
1816
1817
1818void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1819  String::Encoding encoding = instr->hydrogen()->encoding();
1820  Register string = ToRegister(instr->string());
1821  Register result = ToRegister(instr->result());
1822
1823  if (FLAG_debug_code) {
1824    Register scratch = scratch0();
1825    __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1826    __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1827
1828    __ andi(scratch, scratch,
1829            Operand(kStringRepresentationMask | kStringEncodingMask));
1830    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1831    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1832    __ cmpi(scratch,
1833            Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1834                                                          : two_byte_seq_type));
1835    __ Check(eq, kUnexpectedStringType);
1836  }
1837
1838  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1839  if (encoding == String::ONE_BYTE_ENCODING) {
1840    __ lbz(result, operand);
1841  } else {
1842    __ lhz(result, operand);
1843  }
1844}
1845
1846
1847void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1848  String::Encoding encoding = instr->hydrogen()->encoding();
1849  Register string = ToRegister(instr->string());
1850  Register value = ToRegister(instr->value());
1851
1852  if (FLAG_debug_code) {
1853    Register index = ToRegister(instr->index());
1854    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1855    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1856    int encoding_mask =
1857        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1858            ? one_byte_seq_type
1859            : two_byte_seq_type;
1860    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1861  }
1862
1863  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1864  if (encoding == String::ONE_BYTE_ENCODING) {
1865    __ stb(value, operand);
1866  } else {
1867    __ sth(value, operand);
1868  }
1869}
1870
1871
1872void LCodeGen::DoAddI(LAddI* instr) {
1873  LOperand* right = instr->right();
1874  Register left = ToRegister(instr->left());
1875  Register result = ToRegister(instr->result());
1876  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1877#if V8_TARGET_ARCH_PPC64
1878  const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1879                           instr->hydrogen()->representation().IsExternal());
1880#else
1881  const bool isInteger = false;
1882#endif
1883
1884  if (!can_overflow || isInteger) {
1885    if (right->IsConstantOperand()) {
1886      __ Add(result, left, ToOperand(right).immediate(), r0);
1887    } else {
1888      __ add(result, left, EmitLoadRegister(right, ip));
1889    }
1890#if V8_TARGET_ARCH_PPC64
1891    if (can_overflow) {
1892      __ TestIfInt32(result, r0);
1893      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
1894    }
1895#endif
1896  } else {
1897    if (right->IsConstantOperand()) {
1898      __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
1899                                scratch0(), r0);
1900    } else {
1901      __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1902                                scratch0(), r0);
1903    }
1904    DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
1905  }
1906}
1907
1908
1909void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1910  LOperand* left = instr->left();
1911  LOperand* right = instr->right();
1912  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1913  Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
1914  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1915    Register left_reg = ToRegister(left);
1916    Register right_reg = EmitLoadRegister(right, ip);
1917    Register result_reg = ToRegister(instr->result());
1918    Label return_left, done;
1919#if V8_TARGET_ARCH_PPC64
1920    if (instr->hydrogen_value()->representation().IsSmi()) {
1921#endif
1922      __ cmp(left_reg, right_reg);
1923#if V8_TARGET_ARCH_PPC64
1924    } else {
1925      __ cmpw(left_reg, right_reg);
1926    }
1927#endif
1928    if (CpuFeatures::IsSupported(ISELECT)) {
1929      __ isel(cond, result_reg, left_reg, right_reg);
1930    } else {
1931      __ b(cond, &return_left);
1932      __ Move(result_reg, right_reg);
1933      __ b(&done);
1934      __ bind(&return_left);
1935      __ Move(result_reg, left_reg);
1936      __ bind(&done);
1937    }
1938  } else {
1939    DCHECK(instr->hydrogen()->representation().IsDouble());
1940    DoubleRegister left_reg = ToDoubleRegister(left);
1941    DoubleRegister right_reg = ToDoubleRegister(right);
1942    DoubleRegister result_reg = ToDoubleRegister(instr->result());
1943    Label check_nan_left, check_zero, return_left, return_right, done;
1944    __ fcmpu(left_reg, right_reg);
1945    __ bunordered(&check_nan_left);
1946    __ beq(&check_zero);
1947    __ b(cond, &return_left);
1948    __ b(&return_right);
1949
1950    __ bind(&check_zero);
1951    __ fcmpu(left_reg, kDoubleRegZero);
1952    __ bne(&return_left);  // left == right != 0.
1953
1954    // At this point, both left and right are either 0 or -0.
1955    if (operation == HMathMinMax::kMathMin) {
1956      // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
1957      // different registers is most efficiently expressed as -((-L) - R).
1958      __ fneg(left_reg, left_reg);
1959      if (left_reg.is(right_reg)) {
1960        __ fadd(result_reg, left_reg, right_reg);
1961      } else {
1962        __ fsub(result_reg, left_reg, right_reg);
1963      }
1964      __ fneg(result_reg, result_reg);
1965    } else {
1966      // Max: The following works because +0 + -0 == +0
1967      __ fadd(result_reg, left_reg, right_reg);
1968    }
1969    __ b(&done);
1970
1971    __ bind(&check_nan_left);
1972    __ fcmpu(left_reg, left_reg);
1973    __ bunordered(&return_left);  // left == NaN.
1974
1975    __ bind(&return_right);
1976    if (!right_reg.is(result_reg)) {
1977      __ fmr(result_reg, right_reg);
1978    }
1979    __ b(&done);
1980
1981    __ bind(&return_left);
1982    if (!left_reg.is(result_reg)) {
1983      __ fmr(result_reg, left_reg);
1984    }
1985    __ bind(&done);
1986  }
1987}
1988
1989
1990void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1991  DoubleRegister left = ToDoubleRegister(instr->left());
1992  DoubleRegister right = ToDoubleRegister(instr->right());
1993  DoubleRegister result = ToDoubleRegister(instr->result());
1994  switch (instr->op()) {
1995    case Token::ADD:
1996      if (CpuFeatures::IsSupported(VSX)) {
1997        __ xsadddp(result, left, right);
1998      } else {
1999        __ fadd(result, left, right);
2000      }
2001      break;
2002    case Token::SUB:
2003      if (CpuFeatures::IsSupported(VSX)) {
2004        __ xssubdp(result, left, right);
2005      } else {
2006        __ fsub(result, left, right);
2007      }
2008      break;
2009    case Token::MUL:
2010      if (CpuFeatures::IsSupported(VSX)) {
2011        __ xsmuldp(result, left, right);
2012      } else {
2013        __ fmul(result, left, right);
2014      }
2015      break;
2016    case Token::DIV:
2017      if (CpuFeatures::IsSupported(VSX)) {
2018        __ xsdivdp(result, left, right);
2019      } else {
2020        __ fdiv(result, left, right);
2021      }
2022      break;
2023    case Token::MOD: {
2024      __ PrepareCallCFunction(0, 2, scratch0());
2025      __ MovToFloatParameters(left, right);
2026      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2027                       0, 2);
2028      // Move the result in the double result register.
2029      __ MovFromFloatResult(result);
2030      break;
2031    }
2032    default:
2033      UNREACHABLE();
2034      break;
2035  }
2036}
2037
2038
2039void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2040  DCHECK(ToRegister(instr->context()).is(cp));
2041  DCHECK(ToRegister(instr->left()).is(r4));
2042  DCHECK(ToRegister(instr->right()).is(r3));
2043  DCHECK(ToRegister(instr->result()).is(r3));
2044
2045  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2046  CallCode(code, RelocInfo::CODE_TARGET, instr);
2047}
2048
2049
2050template <class InstrType>
2051void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2052  int left_block = instr->TrueDestination(chunk_);
2053  int right_block = instr->FalseDestination(chunk_);
2054
2055  int next_block = GetNextEmittedBlock();
2056
2057  if (right_block == left_block || cond == al) {
2058    EmitGoto(left_block);
2059  } else if (left_block == next_block) {
2060    __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2061  } else if (right_block == next_block) {
2062    __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2063  } else {
2064    __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2065    __ b(chunk_->GetAssemblyLabel(right_block));
2066  }
2067}
2068
2069
2070template <class InstrType>
2071void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
2072  int true_block = instr->TrueDestination(chunk_);
2073  __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
2074}
2075
2076
2077template <class InstrType>
2078void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2079  int false_block = instr->FalseDestination(chunk_);
2080  __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2081}
2082
2083
2084void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2085
2086
2087void LCodeGen::DoBranch(LBranch* instr) {
2088  Representation r = instr->hydrogen()->value()->representation();
2089  DoubleRegister dbl_scratch = double_scratch0();
2090  const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2091                             1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2092
2093  if (r.IsInteger32()) {
2094    DCHECK(!info()->IsStub());
2095    Register reg = ToRegister(instr->value());
2096    __ cmpwi(reg, Operand::Zero());
2097    EmitBranch(instr, ne);
2098  } else if (r.IsSmi()) {
2099    DCHECK(!info()->IsStub());
2100    Register reg = ToRegister(instr->value());
2101    __ cmpi(reg, Operand::Zero());
2102    EmitBranch(instr, ne);
2103  } else if (r.IsDouble()) {
2104    DCHECK(!info()->IsStub());
2105    DoubleRegister reg = ToDoubleRegister(instr->value());
2106    // Test the double value. Zero and NaN are false.
2107    __ fcmpu(reg, kDoubleRegZero, cr7);
2108    __ mfcr(r0);
2109    __ andi(r0, r0, Operand(crZOrNaNBits));
2110    EmitBranch(instr, eq, cr0);
2111  } else {
2112    DCHECK(r.IsTagged());
2113    Register reg = ToRegister(instr->value());
2114    HType type = instr->hydrogen()->value()->type();
2115    if (type.IsBoolean()) {
2116      DCHECK(!info()->IsStub());
2117      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2118      EmitBranch(instr, eq);
2119    } else if (type.IsSmi()) {
2120      DCHECK(!info()->IsStub());
2121      __ cmpi(reg, Operand::Zero());
2122      EmitBranch(instr, ne);
2123    } else if (type.IsJSArray()) {
2124      DCHECK(!info()->IsStub());
2125      EmitBranch(instr, al);
2126    } else if (type.IsHeapNumber()) {
2127      DCHECK(!info()->IsStub());
2128      __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2129      // Test the double value. Zero and NaN are false.
2130      __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2131      __ mfcr(r0);
2132      __ andi(r0, r0, Operand(crZOrNaNBits));
2133      EmitBranch(instr, eq, cr0);
2134    } else if (type.IsString()) {
2135      DCHECK(!info()->IsStub());
2136      __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2137      __ cmpi(ip, Operand::Zero());
2138      EmitBranch(instr, ne);
2139    } else {
2140      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
2141      // Avoid deopts in the case where we've never executed this path before.
2142      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
2143
2144      if (expected & ToBooleanHint::kUndefined) {
2145        // undefined -> false.
2146        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2147        __ beq(instr->FalseLabel(chunk_));
2148      }
2149      if (expected & ToBooleanHint::kBoolean) {
2150        // Boolean -> its value.
2151        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2152        __ beq(instr->TrueLabel(chunk_));
2153        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2154        __ beq(instr->FalseLabel(chunk_));
2155      }
2156      if (expected & ToBooleanHint::kNull) {
2157        // 'null' -> false.
2158        __ CompareRoot(reg, Heap::kNullValueRootIndex);
2159        __ beq(instr->FalseLabel(chunk_));
2160      }
2161
2162      if (expected & ToBooleanHint::kSmallInteger) {
2163        // Smis: 0 -> false, all other -> true.
2164        __ cmpi(reg, Operand::Zero());
2165        __ beq(instr->FalseLabel(chunk_));
2166        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2167      } else if (expected & ToBooleanHint::kNeedsMap) {
2168        // If we need a map later and have a Smi -> deopt.
2169        __ TestIfSmi(reg, r0);
2170        DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
2171      }
2172
2173      const Register map = scratch0();
2174      if (expected & ToBooleanHint::kNeedsMap) {
2175        __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2176
2177        if (expected & ToBooleanHint::kCanBeUndetectable) {
2178          // Undetectable -> false.
2179          __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2180          __ TestBit(ip, Map::kIsUndetectable, r0);
2181          __ bne(instr->FalseLabel(chunk_), cr0);
2182        }
2183      }
2184
2185      if (expected & ToBooleanHint::kReceiver) {
2186        // spec object -> true.
2187        __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2188        __ bge(instr->TrueLabel(chunk_));
2189      }
2190
2191      if (expected & ToBooleanHint::kString) {
2192        // String value -> false iff empty.
2193        Label not_string;
2194        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2195        __ bge(&not_string);
2196        __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2197        __ cmpi(ip, Operand::Zero());
2198        __ bne(instr->TrueLabel(chunk_));
2199        __ b(instr->FalseLabel(chunk_));
2200        __ bind(&not_string);
2201      }
2202
2203      if (expected & ToBooleanHint::kSymbol) {
2204        // Symbol value -> true.
2205        __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2206        __ beq(instr->TrueLabel(chunk_));
2207      }
2208
2209      if (expected & ToBooleanHint::kHeapNumber) {
2210        // heap number -> false iff +0, -0, or NaN.
2211        Label not_heap_number;
2212        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2213        __ bne(&not_heap_number);
2214        __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2215        // Test the double value. Zero and NaN are false.
2216        __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2217        __ mfcr(r0);
2218        __ andi(r0, r0, Operand(crZOrNaNBits));
2219        __ bne(instr->FalseLabel(chunk_), cr0);
2220        __ b(instr->TrueLabel(chunk_));
2221        __ bind(&not_heap_number);
2222      }
2223
2224      if (expected != ToBooleanHint::kAny) {
2225        // We've seen something for the first time -> deopt.
2226        // This can only happen if we are not generic already.
2227        DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
2228      }
2229    }
2230  }
2231}
2232
2233
2234void LCodeGen::EmitGoto(int block) {
2235  if (!IsNextEmittedBlock(block)) {
2236    __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2237  }
2238}
2239
2240
2241void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2242
2243
2244Condition LCodeGen::TokenToCondition(Token::Value op) {
2245  Condition cond = kNoCondition;
2246  switch (op) {
2247    case Token::EQ:
2248    case Token::EQ_STRICT:
2249      cond = eq;
2250      break;
2251    case Token::NE:
2252    case Token::NE_STRICT:
2253      cond = ne;
2254      break;
2255    case Token::LT:
2256      cond = lt;
2257      break;
2258    case Token::GT:
2259      cond = gt;
2260      break;
2261    case Token::LTE:
2262      cond = le;
2263      break;
2264    case Token::GTE:
2265      cond = ge;
2266      break;
2267    case Token::IN:
2268    case Token::INSTANCEOF:
2269    default:
2270      UNREACHABLE();
2271  }
2272  return cond;
2273}
2274
2275
2276void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2277  LOperand* left = instr->left();
2278  LOperand* right = instr->right();
2279  bool is_unsigned =
2280      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2281      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2282  Condition cond = TokenToCondition(instr->op());
2283
2284  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2285    // We can statically evaluate the comparison.
2286    double left_val = ToDouble(LConstantOperand::cast(left));
2287    double right_val = ToDouble(LConstantOperand::cast(right));
2288    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2289                         ? instr->TrueDestination(chunk_)
2290                         : instr->FalseDestination(chunk_);
2291    EmitGoto(next_block);
2292  } else {
2293    if (instr->is_double()) {
2294      // Compare left and right operands as doubles and load the
2295      // resulting flags into the normal status register.
2296      __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2297      // If a NaN is involved, i.e. the result is unordered,
2298      // jump to false block label.
2299      __ bunordered(instr->FalseLabel(chunk_));
2300    } else {
2301      if (right->IsConstantOperand()) {
2302        int32_t value = ToInteger32(LConstantOperand::cast(right));
2303        if (instr->hydrogen_value()->representation().IsSmi()) {
2304          if (is_unsigned) {
2305            __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2306          } else {
2307            __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2308          }
2309        } else {
2310          if (is_unsigned) {
2311            __ Cmplwi(ToRegister(left), Operand(value), r0);
2312          } else {
2313            __ Cmpwi(ToRegister(left), Operand(value), r0);
2314          }
2315        }
2316      } else if (left->IsConstantOperand()) {
2317        int32_t value = ToInteger32(LConstantOperand::cast(left));
2318        if (instr->hydrogen_value()->representation().IsSmi()) {
2319          if (is_unsigned) {
2320            __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2321          } else {
2322            __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2323          }
2324        } else {
2325          if (is_unsigned) {
2326            __ Cmplwi(ToRegister(right), Operand(value), r0);
2327          } else {
2328            __ Cmpwi(ToRegister(right), Operand(value), r0);
2329          }
2330        }
2331        // We commuted the operands, so commute the condition.
2332        cond = CommuteCondition(cond);
2333      } else if (instr->hydrogen_value()->representation().IsSmi()) {
2334        if (is_unsigned) {
2335          __ cmpl(ToRegister(left), ToRegister(right));
2336        } else {
2337          __ cmp(ToRegister(left), ToRegister(right));
2338        }
2339      } else {
2340        if (is_unsigned) {
2341          __ cmplw(ToRegister(left), ToRegister(right));
2342        } else {
2343          __ cmpw(ToRegister(left), ToRegister(right));
2344        }
2345      }
2346    }
2347    EmitBranch(instr, cond);
2348  }
2349}
2350
2351
2352void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2353  Register left = ToRegister(instr->left());
2354  Register right = ToRegister(instr->right());
2355
2356  __ cmp(left, right);
2357  EmitBranch(instr, eq);
2358}
2359
2360
2361void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2362  if (instr->hydrogen()->representation().IsTagged()) {
2363    Register input_reg = ToRegister(instr->object());
2364    __ mov(ip, Operand(factory()->the_hole_value()));
2365    __ cmp(input_reg, ip);
2366    EmitBranch(instr, eq);
2367    return;
2368  }
2369
2370  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2371  __ fcmpu(input_reg, input_reg);
2372  EmitFalseBranch(instr, ordered);
2373
2374  Register scratch = scratch0();
2375  __ MovDoubleHighToInt(scratch, input_reg);
2376  __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2377  EmitBranch(instr, eq);
2378}
2379
2380
2381Condition LCodeGen::EmitIsString(Register input, Register temp1,
2382                                 Label* is_not_string,
2383                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2384  if (check_needed == INLINE_SMI_CHECK) {
2385    __ JumpIfSmi(input, is_not_string);
2386  }
2387  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2388
2389  return lt;
2390}
2391
2392
2393void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2394  Register reg = ToRegister(instr->value());
2395  Register temp1 = ToRegister(instr->temp());
2396
2397  SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2398                              ? OMIT_SMI_CHECK
2399                              : INLINE_SMI_CHECK;
2400  Condition true_cond =
2401      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2402
2403  EmitBranch(instr, true_cond);
2404}
2405
2406
2407void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2408  Register input_reg = EmitLoadRegister(instr->value(), ip);
2409  __ TestIfSmi(input_reg, r0);
2410  EmitBranch(instr, eq, cr0);
2411}
2412
2413
2414void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2415  Register input = ToRegister(instr->value());
2416  Register temp = ToRegister(instr->temp());
2417
2418  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2419    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2420  }
2421  __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2422  __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2423  __ TestBit(temp, Map::kIsUndetectable, r0);
2424  EmitBranch(instr, ne, cr0);
2425}
2426
2427
2428static Condition ComputeCompareCondition(Token::Value op) {
2429  switch (op) {
2430    case Token::EQ_STRICT:
2431    case Token::EQ:
2432      return eq;
2433    case Token::LT:
2434      return lt;
2435    case Token::GT:
2436      return gt;
2437    case Token::LTE:
2438      return le;
2439    case Token::GTE:
2440      return ge;
2441    default:
2442      UNREACHABLE();
2443      return kNoCondition;
2444  }
2445}
2446
2447
2448void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2449  DCHECK(ToRegister(instr->context()).is(cp));
2450  DCHECK(ToRegister(instr->left()).is(r4));
2451  DCHECK(ToRegister(instr->right()).is(r3));
2452
2453  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2454  CallCode(code, RelocInfo::CODE_TARGET, instr);
2455  __ CompareRoot(r3, Heap::kTrueValueRootIndex);
2456  EmitBranch(instr, eq);
2457}
2458
2459
2460static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2461  InstanceType from = instr->from();
2462  InstanceType to = instr->to();
2463  if (from == FIRST_TYPE) return to;
2464  DCHECK(from == to || to == LAST_TYPE);
2465  return from;
2466}
2467
2468
2469static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2470  InstanceType from = instr->from();
2471  InstanceType to = instr->to();
2472  if (from == to) return eq;
2473  if (to == LAST_TYPE) return ge;
2474  if (from == FIRST_TYPE) return le;
2475  UNREACHABLE();
2476  return eq;
2477}
2478
2479
2480void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2481  Register scratch = scratch0();
2482  Register input = ToRegister(instr->value());
2483
2484  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2485    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2486  }
2487
2488  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2489  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2490}
2491
2492// Branches to a label or falls through with the answer in flags.  Trashes
2493// the temp registers, but not the input.
2494void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2495                               Handle<String> class_name, Register input,
2496                               Register temp, Register temp2) {
2497  DCHECK(!input.is(temp));
2498  DCHECK(!input.is(temp2));
2499  DCHECK(!temp.is(temp2));
2500
2501  __ JumpIfSmi(input, is_false);
2502
2503  __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
2504  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2505  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2506    __ bge(is_true);
2507  } else {
2508    __ bge(is_false);
2509  }
2510
2511  // Check if the constructor in the map is a function.
2512  Register instance_type = ip;
2513  __ GetMapConstructor(temp, temp, temp2, instance_type);
2514
2515  // Objects with a non-function constructor have class 'Object'.
2516  __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
2517  if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2518    __ bne(is_true);
2519  } else {
2520    __ bne(is_false);
2521  }
2522
2523  // temp now contains the constructor function. Grab the
2524  // instance class name from there.
2525  __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2526  __ LoadP(temp,
2527           FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2528  // The class name we are testing against is internalized since it's a literal.
2529  // The name in the constructor is internalized because of the way the context
2530  // is booted.  This routine isn't expected to work for random API-created
2531  // classes and it doesn't have to because you can't access it with natives
2532  // syntax.  Since both sides are internalized it is sufficient to use an
2533  // identity comparison.
2534  __ Cmpi(temp, Operand(class_name), r0);
2535  // End with the answer in flags.
2536}
2537
2538
2539void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2540  Register input = ToRegister(instr->value());
2541  Register temp = scratch0();
2542  Register temp2 = ToRegister(instr->temp());
2543  Handle<String> class_name = instr->hydrogen()->class_name();
2544
2545  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2546                  class_name, input, temp, temp2);
2547
2548  EmitBranch(instr, eq);
2549}
2550
2551
2552void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2553  Register reg = ToRegister(instr->value());
2554  Register temp = ToRegister(instr->temp());
2555
2556  __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2557  __ Cmpi(temp, Operand(instr->map()), r0);
2558  EmitBranch(instr, eq);
2559}
2560
2561
2562void LCodeGen::DoHasInPrototypeChainAndBranch(
2563    LHasInPrototypeChainAndBranch* instr) {
2564  Register const object = ToRegister(instr->object());
2565  Register const object_map = scratch0();
2566  Register const object_instance_type = ip;
2567  Register const object_prototype = object_map;
2568  Register const prototype = ToRegister(instr->prototype());
2569
2570  // The {object} must be a spec object.  It's sufficient to know that {object}
2571  // is not a smi, since all other non-spec objects have {null} prototypes and
2572  // will be ruled out below.
2573  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2574    __ TestIfSmi(object, r0);
2575    EmitFalseBranch(instr, eq, cr0);
2576  }
2577
2578  // Loop through the {object}s prototype chain looking for the {prototype}.
2579  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2580  Label loop;
2581  __ bind(&loop);
2582
2583  // Deoptimize if the object needs to be access checked.
2584  __ lbz(object_instance_type,
2585         FieldMemOperand(object_map, Map::kBitFieldOffset));
2586  __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
2587  DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
2588  // Deoptimize for proxies.
2589  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2590  DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
2591  __ LoadP(object_prototype,
2592           FieldMemOperand(object_map, Map::kPrototypeOffset));
2593  __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2594  EmitFalseBranch(instr, eq);
2595  __ cmp(object_prototype, prototype);
2596  EmitTrueBranch(instr, eq);
2597  __ LoadP(object_map,
2598           FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2599  __ b(&loop);
2600}
2601
2602
2603void LCodeGen::DoCmpT(LCmpT* instr) {
2604  DCHECK(ToRegister(instr->context()).is(cp));
2605  Token::Value op = instr->op();
2606
2607  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2608  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2609  // This instruction also signals no smi code inlined
2610  __ cmpi(r3, Operand::Zero());
2611
2612  Condition condition = ComputeCompareCondition(op);
2613  if (CpuFeatures::IsSupported(ISELECT)) {
2614    __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2615    __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2616    __ isel(condition, ToRegister(instr->result()), r4, r5);
2617  } else {
2618    Label true_value, done;
2619
2620    __ b(condition, &true_value);
2621
2622    __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2623    __ b(&done);
2624
2625    __ bind(&true_value);
2626    __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2627
2628    __ bind(&done);
2629  }
2630}
2631
2632
2633void LCodeGen::DoReturn(LReturn* instr) {
2634  if (FLAG_trace && info()->IsOptimizing()) {
2635    // Push the return value on the stack as the parameter.
2636    // Runtime::TraceExit returns its parameter in r3.  We're leaving the code
2637    // managed by the register allocator and tearing down the frame, it's
2638    // safe to write to the context register.
2639    __ push(r3);
2640    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2641    __ CallRuntime(Runtime::kTraceExit);
2642  }
2643  if (info()->saves_caller_doubles()) {
2644    RestoreCallerDoubles();
2645  }
2646  if (instr->has_constant_parameter_count()) {
2647    int parameter_count = ToInteger32(instr->constant_parameter_count());
2648    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2649    if (NeedsEagerFrame()) {
2650      masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2651    } else if (sp_delta != 0) {
2652      __ addi(sp, sp, Operand(sp_delta));
2653    }
2654  } else {
2655    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2656    Register reg = ToRegister(instr->parameter_count());
2657    // The argument count parameter is a smi
2658    if (NeedsEagerFrame()) {
2659      masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2660    }
2661    __ SmiToPtrArrayOffset(r0, reg);
2662    __ add(sp, sp, r0);
2663  }
2664
2665  __ blr();
2666}
2667
2668
2669void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2670  Register context = ToRegister(instr->context());
2671  Register result = ToRegister(instr->result());
2672  __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
2673  if (instr->hydrogen()->RequiresHoleCheck()) {
2674    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2675    if (instr->hydrogen()->DeoptimizesOnHole()) {
2676      __ cmp(result, ip);
2677      DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2678    } else {
2679      if (CpuFeatures::IsSupported(ISELECT)) {
2680        Register scratch = scratch0();
2681        __ mov(scratch, Operand(factory()->undefined_value()));
2682        __ cmp(result, ip);
2683        __ isel(eq, result, scratch, result);
2684      } else {
2685        Label skip;
2686        __ cmp(result, ip);
2687        __ bne(&skip);
2688        __ mov(result, Operand(factory()->undefined_value()));
2689        __ bind(&skip);
2690      }
2691    }
2692  }
2693}
2694
2695
2696void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2697  Register context = ToRegister(instr->context());
2698  Register value = ToRegister(instr->value());
2699  Register scratch = scratch0();
2700  MemOperand target = ContextMemOperand(context, instr->slot_index());
2701
2702  Label skip_assignment;
2703
2704  if (instr->hydrogen()->RequiresHoleCheck()) {
2705    __ LoadP(scratch, target);
2706    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2707    __ cmp(scratch, ip);
2708    if (instr->hydrogen()->DeoptimizesOnHole()) {
2709      DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2710    } else {
2711      __ bne(&skip_assignment);
2712    }
2713  }
2714
2715  __ StoreP(value, target, r0);
2716  if (instr->hydrogen()->NeedsWriteBarrier()) {
2717    SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2718                                ? OMIT_SMI_CHECK
2719                                : INLINE_SMI_CHECK;
2720    __ RecordWriteContextSlot(context, target.offset(), value, scratch,
2721                              GetLinkRegisterState(), kSaveFPRegs,
2722                              EMIT_REMEMBERED_SET, check_needed);
2723  }
2724
2725  __ bind(&skip_assignment);
2726}
2727
2728
2729void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2730  HObjectAccess access = instr->hydrogen()->access();
2731  int offset = access.offset();
2732  Register object = ToRegister(instr->object());
2733
2734  if (access.IsExternalMemory()) {
2735    Register result = ToRegister(instr->result());
2736    MemOperand operand = MemOperand(object, offset);
2737    __ LoadRepresentation(result, operand, access.representation(), r0);
2738    return;
2739  }
2740
2741  if (instr->hydrogen()->representation().IsDouble()) {
2742    DCHECK(access.IsInobject());
2743    DoubleRegister result = ToDoubleRegister(instr->result());
2744    __ lfd(result, FieldMemOperand(object, offset));
2745    return;
2746  }
2747
2748  Register result = ToRegister(instr->result());
2749  if (!access.IsInobject()) {
2750    __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2751    object = result;
2752  }
2753
2754  Representation representation = access.representation();
2755
2756#if V8_TARGET_ARCH_PPC64
2757  // 64-bit Smi optimization
2758  if (representation.IsSmi() &&
2759      instr->hydrogen()->representation().IsInteger32()) {
2760    // Read int value directly from upper half of the smi.
2761    offset = SmiWordOffset(offset);
2762    representation = Representation::Integer32();
2763  }
2764#endif
2765
2766  __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
2767                        r0);
2768}
2769
2770
2771void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2772  Register scratch = scratch0();
2773  Register function = ToRegister(instr->function());
2774  Register result = ToRegister(instr->result());
2775
2776  // Get the prototype or initial map from the function.
2777  __ LoadP(result,
2778           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2779
2780  // Check that the function has a prototype or an initial map.
2781  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2782  __ cmp(result, ip);
2783  DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
2784
2785  // If the function does not have an initial map, we're done.
2786  if (CpuFeatures::IsSupported(ISELECT)) {
2787    // Get the prototype from the initial map (optimistic).
2788    __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
2789    __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2790    __ isel(eq, result, ip, result);
2791  } else {
2792    Label done;
2793    __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2794    __ bne(&done);
2795
2796    // Get the prototype from the initial map.
2797    __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2798
2799    // All done.
2800    __ bind(&done);
2801  }
2802}
2803
2804
2805void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2806  Register result = ToRegister(instr->result());
2807  __ LoadRoot(result, instr->index());
2808}
2809
2810
2811void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2812  Register arguments = ToRegister(instr->arguments());
2813  Register result = ToRegister(instr->result());
2814  // There are two words between the frame pointer and the last argument.
2815  // Subtracting from length accounts for one of them add one more.
2816  if (instr->length()->IsConstantOperand()) {
2817    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2818    if (instr->index()->IsConstantOperand()) {
2819      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2820      int index = (const_length - const_index) + 1;
2821      __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
2822    } else {
2823      Register index = ToRegister(instr->index());
2824      __ subfic(result, index, Operand(const_length + 1));
2825      __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2826      __ LoadPX(result, MemOperand(arguments, result));
2827    }
2828  } else if (instr->index()->IsConstantOperand()) {
2829    Register length = ToRegister(instr->length());
2830    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2831    int loc = const_index - 1;
2832    if (loc != 0) {
2833      __ subi(result, length, Operand(loc));
2834      __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2835      __ LoadPX(result, MemOperand(arguments, result));
2836    } else {
2837      __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
2838      __ LoadPX(result, MemOperand(arguments, result));
2839    }
2840  } else {
2841    Register length = ToRegister(instr->length());
2842    Register index = ToRegister(instr->index());
2843    __ sub(result, length, index);
2844    __ addi(result, result, Operand(1));
2845    __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
2846    __ LoadPX(result, MemOperand(arguments, result));
2847  }
2848}
2849
2850
2851void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2852  Register external_pointer = ToRegister(instr->elements());
2853  Register key = no_reg;
2854  ElementsKind elements_kind = instr->elements_kind();
2855  bool key_is_constant = instr->key()->IsConstantOperand();
2856  int constant_key = 0;
2857  if (key_is_constant) {
2858    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2859    if (constant_key & 0xF0000000) {
2860      Abort(kArrayIndexConstantValueTooBig);
2861    }
2862  } else {
2863    key = ToRegister(instr->key());
2864  }
2865  int element_size_shift = ElementsKindToShiftSize(elements_kind);
2866  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2867  int base_offset = instr->base_offset();
2868
2869  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2870    DoubleRegister result = ToDoubleRegister(instr->result());
2871    if (key_is_constant) {
2872      __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
2873             r0);
2874    } else {
2875      __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
2876      __ add(scratch0(), external_pointer, r0);
2877    }
2878    if (elements_kind == FLOAT32_ELEMENTS) {
2879      __ lfs(result, MemOperand(scratch0(), base_offset));
2880    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2881      __ lfd(result, MemOperand(scratch0(), base_offset));
2882    }
2883  } else {
2884    Register result = ToRegister(instr->result());
2885    MemOperand mem_operand =
2886        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
2887                            constant_key, element_size_shift, base_offset);
2888    switch (elements_kind) {
2889      case INT8_ELEMENTS:
2890        if (key_is_constant) {
2891          __ LoadByte(result, mem_operand, r0);
2892        } else {
2893          __ lbzx(result, mem_operand);
2894        }
2895        __ extsb(result, result);
2896        break;
2897      case UINT8_ELEMENTS:
2898      case UINT8_CLAMPED_ELEMENTS:
2899        if (key_is_constant) {
2900          __ LoadByte(result, mem_operand, r0);
2901        } else {
2902          __ lbzx(result, mem_operand);
2903        }
2904        break;
2905      case INT16_ELEMENTS:
2906        if (key_is_constant) {
2907          __ LoadHalfWordArith(result, mem_operand, r0);
2908        } else {
2909          __ lhax(result, mem_operand);
2910        }
2911        break;
2912      case UINT16_ELEMENTS:
2913        if (key_is_constant) {
2914          __ LoadHalfWord(result, mem_operand, r0);
2915        } else {
2916          __ lhzx(result, mem_operand);
2917        }
2918        break;
2919      case INT32_ELEMENTS:
2920        if (key_is_constant) {
2921          __ LoadWordArith(result, mem_operand, r0);
2922        } else {
2923          __ lwax(result, mem_operand);
2924        }
2925        break;
2926      case UINT32_ELEMENTS:
2927        if (key_is_constant) {
2928          __ LoadWord(result, mem_operand, r0);
2929        } else {
2930          __ lwzx(result, mem_operand);
2931        }
2932        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2933          __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2934          __ cmplw(result, r0);
2935          DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
2936        }
2937        break;
2938      case FLOAT32_ELEMENTS:
2939      case FLOAT64_ELEMENTS:
2940      case FAST_HOLEY_DOUBLE_ELEMENTS:
2941      case FAST_HOLEY_ELEMENTS:
2942      case FAST_HOLEY_SMI_ELEMENTS:
2943      case FAST_DOUBLE_ELEMENTS:
2944      case FAST_ELEMENTS:
2945      case FAST_SMI_ELEMENTS:
2946      case DICTIONARY_ELEMENTS:
2947      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2948      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2949      case FAST_STRING_WRAPPER_ELEMENTS:
2950      case SLOW_STRING_WRAPPER_ELEMENTS:
2951      case NO_ELEMENTS:
2952        UNREACHABLE();
2953        break;
2954    }
2955  }
2956}
2957
2958
2959void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2960  Register elements = ToRegister(instr->elements());
2961  bool key_is_constant = instr->key()->IsConstantOperand();
2962  Register key = no_reg;
2963  DoubleRegister result = ToDoubleRegister(instr->result());
2964  Register scratch = scratch0();
2965
2966  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2967  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2968  int constant_key = 0;
2969  if (key_is_constant) {
2970    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2971    if (constant_key & 0xF0000000) {
2972      Abort(kArrayIndexConstantValueTooBig);
2973    }
2974  } else {
2975    key = ToRegister(instr->key());
2976  }
2977
2978  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
2979  if (!key_is_constant) {
2980    __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
2981    __ add(scratch, elements, r0);
2982    elements = scratch;
2983  }
2984  if (!is_int16(base_offset)) {
2985    __ Add(scratch, elements, base_offset, r0);
2986    base_offset = 0;
2987    elements = scratch;
2988  }
2989  __ lfd(result, MemOperand(elements, base_offset));
2990
2991  if (instr->hydrogen()->RequiresHoleCheck()) {
2992    if (is_int16(base_offset + Register::kExponentOffset)) {
2993      __ lwz(scratch,
2994             MemOperand(elements, base_offset + Register::kExponentOffset));
2995    } else {
2996      __ addi(scratch, elements, Operand(base_offset));
2997      __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
2998    }
2999    __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3000    DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
3001  }
3002}
3003
3004
3005void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3006  HLoadKeyed* hinstr = instr->hydrogen();
3007  Register elements = ToRegister(instr->elements());
3008  Register result = ToRegister(instr->result());
3009  Register scratch = scratch0();
3010  Register store_base = scratch;
3011  int offset = instr->base_offset();
3012
3013  if (instr->key()->IsConstantOperand()) {
3014    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3015    offset += ToInteger32(const_operand) * kPointerSize;
3016    store_base = elements;
3017  } else {
3018    Register key = ToRegister(instr->key());
3019    // Even though the HLoadKeyed instruction forces the input
3020    // representation for the key to be an integer, the input gets replaced
3021    // during bound check elimination with the index argument to the bounds
3022    // check, which can be tagged, so that case must be handled here, too.
3023    if (hinstr->key()->representation().IsSmi()) {
3024      __ SmiToPtrArrayOffset(r0, key);
3025    } else {
3026      __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3027    }
3028    __ add(scratch, elements, r0);
3029  }
3030
3031  bool requires_hole_check = hinstr->RequiresHoleCheck();
3032  Representation representation = hinstr->representation();
3033
3034#if V8_TARGET_ARCH_PPC64
3035  // 64-bit Smi optimization
3036  if (representation.IsInteger32() &&
3037      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3038    DCHECK(!requires_hole_check);
3039    // Read int value directly from upper half of the smi.
3040    offset = SmiWordOffset(offset);
3041  }
3042#endif
3043
3044  __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3045                        r0);
3046
3047  // Check for the hole value.
3048  if (requires_hole_check) {
3049    if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3050      __ TestIfSmi(result, r0);
3051      DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
3052    } else {
3053      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3054      __ cmp(result, scratch);
3055      DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
3056    }
3057  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3058    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3059    Label done;
3060    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3061    __ cmp(result, scratch);
3062    __ bne(&done);
3063    if (info()->IsStub()) {
3064      // A stub can safely convert the hole to undefined only if the array
3065      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
3066      // it needs to bail out.
3067      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3068      __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
3069      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
3070      DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
3071    }
3072    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3073    __ bind(&done);
3074  }
3075}
3076
3077
3078void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3079  if (instr->is_fixed_typed_array()) {
3080    DoLoadKeyedExternalArray(instr);
3081  } else if (instr->hydrogen()->representation().IsDouble()) {
3082    DoLoadKeyedFixedDoubleArray(instr);
3083  } else {
3084    DoLoadKeyedFixedArray(instr);
3085  }
3086}
3087
3088
3089MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3090                                         bool key_is_constant, bool key_is_smi,
3091                                         int constant_key,
3092                                         int element_size_shift,
3093                                         int base_offset) {
3094  Register scratch = scratch0();
3095
3096  if (key_is_constant) {
3097    return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3098  }
3099
3100  bool needs_shift =
3101      (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3102
3103  if (!(base_offset || needs_shift)) {
3104    return MemOperand(base, key);
3105  }
3106
3107  if (needs_shift) {
3108    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3109    key = scratch;
3110  }
3111
3112  if (base_offset) {
3113    __ Add(scratch, key, base_offset, r0);
3114  }
3115
3116  return MemOperand(base, scratch);
3117}
3118
3119
3120void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3121  Register scratch = scratch0();
3122  Register result = ToRegister(instr->result());
3123
3124  if (instr->hydrogen()->from_inlined()) {
3125    __ subi(result, sp, Operand(2 * kPointerSize));
3126  } else if (instr->hydrogen()->arguments_adaptor()) {
3127    // Check if the calling frame is an arguments adaptor frame.
3128    __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3129    __ LoadP(
3130        result,
3131        MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
3132    __ cmpi(result,
3133            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
3134
3135    // Result is the frame pointer for the frame if not adapted and for the real
3136    // frame below the adaptor frame if adapted.
3137    if (CpuFeatures::IsSupported(ISELECT)) {
3138      __ isel(eq, result, scratch, fp);
3139    } else {
3140      Label done, adapted;
3141      __ beq(&adapted);
3142      __ mr(result, fp);
3143      __ b(&done);
3144
3145      __ bind(&adapted);
3146      __ mr(result, scratch);
3147      __ bind(&done);
3148    }
3149  } else {
3150    __ mr(result, fp);
3151  }
3152}
3153
3154
3155void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3156  Register elem = ToRegister(instr->elements());
3157  Register result = ToRegister(instr->result());
3158
3159  Label done;
3160
3161  // If no arguments adaptor frame the number of arguments is fixed.
3162  __ cmp(fp, elem);
3163  __ mov(result, Operand(scope()->num_parameters()));
3164  __ beq(&done);
3165
3166  // Arguments adaptor frame present. Get argument length from there.
3167  __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3168  __ LoadP(result,
3169           MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3170  __ SmiUntag(result);
3171
3172  // Argument length is in result register.
3173  __ bind(&done);
3174}
3175
3176
3177void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3178  Register receiver = ToRegister(instr->receiver());
3179  Register function = ToRegister(instr->function());
3180  Register result = ToRegister(instr->result());
3181  Register scratch = scratch0();
3182
3183  // If the receiver is null or undefined, we have to pass the global
3184  // object as a receiver to normal functions. Values have to be
3185  // passed unchanged to builtins and strict-mode functions.
3186  Label global_object, result_in_receiver;
3187
3188  if (!instr->hydrogen()->known_function()) {
3189    // Do not transform the receiver to object for strict mode
3190    // functions or builtins.
3191    __ LoadP(scratch,
3192             FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3193    __ lwz(scratch,
3194           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3195    __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
3196                                 (1 << SharedFunctionInfo::kNativeBit)));
3197    __ bne(&result_in_receiver, cr0);
3198  }
3199
3200  // Normal function. Replace undefined or null with global receiver.
3201  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3202  __ cmp(receiver, scratch);
3203  __ beq(&global_object);
3204  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3205  __ cmp(receiver, scratch);
3206  __ beq(&global_object);
3207
3208  // Deoptimize if the receiver is not a JS object.
3209  __ TestIfSmi(receiver, r0);
3210  DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
3211  __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3212  DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
3213
3214  __ b(&result_in_receiver);
3215  __ bind(&global_object);
3216  __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3217  __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3218  __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3219
3220  if (result.is(receiver)) {
3221    __ bind(&result_in_receiver);
3222  } else {
3223    Label result_ok;
3224    __ b(&result_ok);
3225    __ bind(&result_in_receiver);
3226    __ mr(result, receiver);
3227    __ bind(&result_ok);
3228  }
3229}
3230
3231
3232void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3233  Register receiver = ToRegister(instr->receiver());
3234  Register function = ToRegister(instr->function());
3235  Register length = ToRegister(instr->length());
3236  Register elements = ToRegister(instr->elements());
3237  Register scratch = scratch0();
3238  DCHECK(receiver.is(r3));  // Used for parameter count.
3239  DCHECK(function.is(r4));  // Required by InvokeFunction.
3240  DCHECK(ToRegister(instr->result()).is(r3));
3241
3242  // Copy the arguments to this function possibly from the
3243  // adaptor frame below it.
3244  const uint32_t kArgumentsLimit = 1 * KB;
3245  __ cmpli(length, Operand(kArgumentsLimit));
3246  DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
3247
3248  // Push the receiver and use the register to keep the original
3249  // number of arguments.
3250  __ push(receiver);
3251  __ mr(receiver, length);
3252  // The arguments are at a one pointer size offset from elements.
3253  __ addi(elements, elements, Operand(1 * kPointerSize));
3254
3255  // Loop through the arguments pushing them onto the execution
3256  // stack.
3257  Label invoke, loop;
3258  // length is a small non-negative integer, due to the test above.
3259  __ cmpi(length, Operand::Zero());
3260  __ beq(&invoke);
3261  __ mtctr(length);
3262  __ bind(&loop);
3263  __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3264  __ LoadPX(scratch, MemOperand(elements, r0));
3265  __ push(scratch);
3266  __ addi(length, length, Operand(-1));
3267  __ bdnz(&loop);
3268
3269  __ bind(&invoke);
3270
3271  InvokeFlag flag = CALL_FUNCTION;
3272  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3273    DCHECK(!info()->saves_caller_doubles());
3274    // TODO(ishell): drop current frame before pushing arguments to the stack.
3275    flag = JUMP_FUNCTION;
3276    ParameterCount actual(r3);
3277    // It is safe to use r6, r7 and r8 as scratch registers here given that
3278    // 1) we are not going to return to caller function anyway,
3279    // 2) r6 (new.target) will be initialized below.
3280    PrepareForTailCall(actual, r6, r7, r8);
3281  }
3282
3283  DCHECK(instr->HasPointerMap());
3284  LPointerMap* pointers = instr->pointer_map();
3285  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3286  // The number of arguments is stored in receiver which is r3, as expected
3287  // by InvokeFunction.
3288  ParameterCount actual(receiver);
3289  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3290}
3291
3292
3293void LCodeGen::DoPushArgument(LPushArgument* instr) {
3294  LOperand* argument = instr->value();
3295  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3296    Abort(kDoPushArgumentNotImplementedForDoubleType);
3297  } else {
3298    Register argument_reg = EmitLoadRegister(argument, ip);
3299    __ push(argument_reg);
3300  }
3301}
3302
3303
3304void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3305
3306
3307void LCodeGen::DoThisFunction(LThisFunction* instr) {
3308  Register result = ToRegister(instr->result());
3309  __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3310}
3311
3312
3313void LCodeGen::DoContext(LContext* instr) {
3314  // If there is a non-return use, the context must be moved to a register.
3315  Register result = ToRegister(instr->result());
3316  if (info()->IsOptimizing()) {
3317    __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3318  } else {
3319    // If there is no frame, the context must be in cp.
3320    DCHECK(result.is(cp));
3321  }
3322}
3323
3324
3325void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3326  DCHECK(ToRegister(instr->context()).is(cp));
3327  __ Move(scratch0(), instr->hydrogen()->declarations());
3328  __ push(scratch0());
3329  __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3330  __ push(scratch0());
3331  __ Move(scratch0(), instr->hydrogen()->feedback_vector());
3332  __ push(scratch0());
3333  CallRuntime(Runtime::kDeclareGlobals, instr);
3334}
3335
3336void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3337                                 int formal_parameter_count, int arity,
3338                                 bool is_tail_call, LInstruction* instr) {
3339  bool dont_adapt_arguments =
3340      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3341  bool can_invoke_directly =
3342      dont_adapt_arguments || formal_parameter_count == arity;
3343
3344  Register function_reg = r4;
3345
3346  LPointerMap* pointers = instr->pointer_map();
3347
3348  if (can_invoke_directly) {
3349    // Change context.
3350    __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3351
3352    // Always initialize new target and number of actual arguments.
3353    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
3354    __ mov(r3, Operand(arity));
3355
3356    bool is_self_call = function.is_identical_to(info()->closure());
3357
3358    // Invoke function.
3359    if (is_self_call) {
3360      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3361      if (is_tail_call) {
3362        __ Jump(self, RelocInfo::CODE_TARGET);
3363      } else {
3364        __ Call(self, RelocInfo::CODE_TARGET);
3365      }
3366    } else {
3367      __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3368      if (is_tail_call) {
3369        __ JumpToJSEntry(ip);
3370      } else {
3371        __ CallJSEntry(ip);
3372      }
3373    }
3374
3375    if (!is_tail_call) {
3376      // Set up deoptimization.
3377      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3378    }
3379  } else {
3380    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3381    ParameterCount actual(arity);
3382    ParameterCount expected(formal_parameter_count);
3383    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3384    __ InvokeFunction(function_reg, expected, actual, flag, generator);
3385  }
3386}
3387
3388
3389void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3390  DCHECK(instr->context() != NULL);
3391  DCHECK(ToRegister(instr->context()).is(cp));
3392  Register input = ToRegister(instr->value());
3393  Register result = ToRegister(instr->result());
3394  Register scratch = scratch0();
3395
3396  // Deoptimize if not a heap number.
3397  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3398  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3399  __ cmp(scratch, ip);
3400  DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
3401
3402  Label done;
3403  Register exponent = scratch0();
3404  scratch = no_reg;
3405  __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3406  // Check the sign of the argument. If the argument is positive, just
3407  // return it.
3408  __ cmpwi(exponent, Operand::Zero());
3409  // Move the input to the result if necessary.
3410  __ Move(result, input);
3411  __ bge(&done);
3412
3413  // Input is negative. Reverse its sign.
3414  // Preserve the value of all registers.
3415  {
3416    PushSafepointRegistersScope scope(this);
3417
3418    // Registers were saved at the safepoint, so we can use
3419    // many scratch registers.
3420    Register tmp1 = input.is(r4) ? r3 : r4;
3421    Register tmp2 = input.is(r5) ? r3 : r5;
3422    Register tmp3 = input.is(r6) ? r3 : r6;
3423    Register tmp4 = input.is(r7) ? r3 : r7;
3424
3425    // exponent: floating point exponent value.
3426
3427    Label allocated, slow;
3428    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3429    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3430    __ b(&allocated);
3431
3432    // Slow case: Call the runtime system to do the number allocation.
3433    __ bind(&slow);
3434
3435    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3436                            instr->context());
3437    // Set the pointer to the new heap number in tmp.
3438    if (!tmp1.is(r3)) __ mr(tmp1, r3);
3439    // Restore input_reg after call to runtime.
3440    __ LoadFromSafepointRegisterSlot(input, input);
3441    __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3442
3443    __ bind(&allocated);
3444    // exponent: floating point exponent value.
3445    // tmp1: allocated heap number.
3446    STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3447    __ clrlwi(exponent, exponent, Operand(1));  // clear sign bit
3448    __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3449    __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3450    __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3451
3452    __ StoreToSafepointRegisterSlot(tmp1, result);
3453  }
3454
3455  __ bind(&done);
3456}
3457
3458
3459void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3460  Register input = ToRegister(instr->value());
3461  Register result = ToRegister(instr->result());
3462  Label done;
3463  __ cmpi(input, Operand::Zero());
3464  __ Move(result, input);
3465  __ bge(&done);
3466  __ li(r0, Operand::Zero());  // clear xer
3467  __ mtxer(r0);
3468  __ neg(result, result, SetOE, SetRC);
3469  // Deoptimize on overflow.
3470  DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
3471  __ bind(&done);
3472}
3473
3474
3475#if V8_TARGET_ARCH_PPC64
3476void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3477  Register input = ToRegister(instr->value());
3478  Register result = ToRegister(instr->result());
3479  Label done;
3480  __ cmpwi(input, Operand::Zero());
3481  __ Move(result, input);
3482  __ bge(&done);
3483
3484  // Deoptimize on overflow.
3485  __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3486  __ cmpw(input, r0);
3487  DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
3488
3489  __ neg(result, result);
3490  __ bind(&done);
3491}
3492#endif
3493
3494
3495void LCodeGen::DoMathAbs(LMathAbs* instr) {
3496  // Class for deferred case.
3497  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3498   public:
3499    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3500        : LDeferredCode(codegen), instr_(instr) {}
3501    void Generate() override {
3502      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3503    }
3504    LInstruction* instr() override { return instr_; }
3505
3506   private:
3507    LMathAbs* instr_;
3508  };
3509
3510  Representation r = instr->hydrogen()->value()->representation();
3511  if (r.IsDouble()) {
3512    DoubleRegister input = ToDoubleRegister(instr->value());
3513    DoubleRegister result = ToDoubleRegister(instr->result());
3514    __ fabs(result, input);
3515#if V8_TARGET_ARCH_PPC64
3516  } else if (r.IsInteger32()) {
3517    EmitInteger32MathAbs(instr);
3518  } else if (r.IsSmi()) {
3519#else
3520  } else if (r.IsSmiOrInteger32()) {
3521#endif
3522    EmitMathAbs(instr);
3523  } else {
3524    // Representation is tagged.
3525    DeferredMathAbsTaggedHeapNumber* deferred =
3526        new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3527    Register input = ToRegister(instr->value());
3528    // Smi check.
3529    __ JumpIfNotSmi(input, deferred->entry());
3530    // If smi, handle it directly.
3531    EmitMathAbs(instr);
3532    __ bind(deferred->exit());
3533  }
3534}
3535
3536void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3537  DoubleRegister input_reg = ToDoubleRegister(instr->value());
3538  DoubleRegister output_reg = ToDoubleRegister(instr->result());
3539  __ frim(output_reg, input_reg);
3540}
3541
3542void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3543  DoubleRegister input = ToDoubleRegister(instr->value());
3544  Register result = ToRegister(instr->result());
3545  Register input_high = scratch0();
3546  Register scratch = ip;
3547  Label done, exact;
3548
3549  __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3550                   &exact);
3551  DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3552
3553  __ bind(&exact);
3554  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3555    // Test for -0.
3556    __ cmpi(result, Operand::Zero());
3557    __ bne(&done);
3558    __ cmpwi(input_high, Operand::Zero());
3559    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
3560  }
3561  __ bind(&done);
3562}
3563
3564void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
3565  DoubleRegister input_reg = ToDoubleRegister(instr->value());
3566  DoubleRegister output_reg = ToDoubleRegister(instr->result());
3567  DoubleRegister dot_five = double_scratch0();
3568  Label done;
3569
3570  __ frin(output_reg, input_reg);
3571  __ fcmpu(input_reg, kDoubleRegZero);
3572  __ bge(&done);
3573  __ fcmpu(output_reg, input_reg);
3574  __ beq(&done);
3575
3576  // Negative, non-integer case
3577  __ LoadDoubleLiteral(dot_five, 0.5, r0);
3578  __ fadd(output_reg, input_reg, dot_five);
3579  __ frim(output_reg, output_reg);
3580  // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
3581  __ fabs(output_reg, output_reg);
3582  __ fneg(output_reg, output_reg);
3583
3584  __ bind(&done);
3585}
3586
3587void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
3588  DoubleRegister input = ToDoubleRegister(instr->value());
3589  Register result = ToRegister(instr->result());
3590  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3591  DoubleRegister input_plus_dot_five = double_scratch1;
3592  Register scratch1 = scratch0();
3593  Register scratch2 = ip;
3594  DoubleRegister dot_five = double_scratch0();
3595  Label convert, done;
3596
3597  __ LoadDoubleLiteral(dot_five, 0.5, r0);
3598  __ fabs(double_scratch1, input);
3599  __ fcmpu(double_scratch1, dot_five);
3600  DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3601  // If input is in [-0.5, -0], the result is -0.
3602  // If input is in [+0, +0.5[, the result is +0.
3603  // If the input is +0.5, the result is 1.
3604  __ bgt(&convert);  // Out of [-0.5, +0.5].
3605  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3606    // [-0.5, -0] (negative) yields minus zero.
3607    __ TestDoubleSign(input, scratch1);
3608    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
3609  }
3610  __ fcmpu(input, dot_five);
3611  if (CpuFeatures::IsSupported(ISELECT)) {
3612    __ li(result, Operand(1));
3613    __ isel(lt, result, r0, result);
3614    __ b(&done);
3615  } else {
3616    Label return_zero;
3617    __ bne(&return_zero);
3618    __ li(result, Operand(1));  // +0.5.
3619    __ b(&done);
3620    // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3621    // flag kBailoutOnMinusZero.
3622    __ bind(&return_zero);
3623    __ li(result, Operand::Zero());
3624    __ b(&done);
3625  }
3626
3627  __ bind(&convert);
3628  __ fadd(input_plus_dot_five, input, dot_five);
3629  // Reuse dot_five (double_scratch0) as we no longer need this value.
3630  __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3631                   double_scratch0(), &done, &done);
3632  DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
3633  __ bind(&done);
3634}
3635
3636
3637void LCodeGen::DoMathFround(LMathFround* instr) {
3638  DoubleRegister input_reg = ToDoubleRegister(instr->value());
3639  DoubleRegister output_reg = ToDoubleRegister(instr->result());
3640  __ frsp(output_reg, input_reg);
3641}
3642
3643
3644void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3645  DoubleRegister input = ToDoubleRegister(instr->value());
3646  DoubleRegister result = ToDoubleRegister(instr->result());
3647  __ fsqrt(result, input);
3648}
3649
3650
3651void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3652  DoubleRegister input = ToDoubleRegister(instr->value());
3653  DoubleRegister result = ToDoubleRegister(instr->result());
3654  DoubleRegister temp = double_scratch0();
3655
3656  // Note that according to ECMA-262 15.8.2.13:
3657  // Math.pow(-Infinity, 0.5) == Infinity
3658  // Math.sqrt(-Infinity) == NaN
3659  Label skip, done;
3660
3661  __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3662  __ fcmpu(input, temp);
3663  __ bne(&skip);
3664  __ fneg(result, temp);
3665  __ b(&done);
3666
3667  // Add +0 to convert -0 to +0.
3668  __ bind(&skip);
3669  __ fadd(result, input, kDoubleRegZero);
3670  __ fsqrt(result, result);
3671  __ bind(&done);
3672}
3673
3674
3675void LCodeGen::DoPower(LPower* instr) {
3676  Representation exponent_type = instr->hydrogen()->right()->representation();
3677// Having marked this as a call, we can use any registers.
3678// Just make sure that the input/output registers are the expected ones.
3679  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3680  DCHECK(!instr->right()->IsDoubleRegister() ||
3681         ToDoubleRegister(instr->right()).is(d2));
3682  DCHECK(!instr->right()->IsRegister() ||
3683         ToRegister(instr->right()).is(tagged_exponent));
3684  DCHECK(ToDoubleRegister(instr->left()).is(d1));
3685  DCHECK(ToDoubleRegister(instr->result()).is(d3));
3686
3687  if (exponent_type.IsSmi()) {
3688    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3689    __ CallStub(&stub);
3690  } else if (exponent_type.IsTagged()) {
3691    Label no_deopt;
3692    __ JumpIfSmi(tagged_exponent, &no_deopt);
3693    DCHECK(!r10.is(tagged_exponent));
3694    __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3695    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3696    __ cmp(r10, ip);
3697    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
3698    __ bind(&no_deopt);
3699    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3700    __ CallStub(&stub);
3701  } else if (exponent_type.IsInteger32()) {
3702    MathPowStub stub(isolate(), MathPowStub::INTEGER);
3703    __ CallStub(&stub);
3704  } else {
3705    DCHECK(exponent_type.IsDouble());
3706    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3707    __ CallStub(&stub);
3708  }
3709}
3710
3711void LCodeGen::DoMathCos(LMathCos* instr) {
3712  __ PrepareCallCFunction(0, 1, scratch0());
3713  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3714  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3715  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3716}
3717
3718void LCodeGen::DoMathSin(LMathSin* instr) {
3719  __ PrepareCallCFunction(0, 1, scratch0());
3720  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3721  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3722  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3723}
3724
3725void LCodeGen::DoMathExp(LMathExp* instr) {
3726  __ PrepareCallCFunction(0, 1, scratch0());
3727  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3728  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3729  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3730}
3731
3732void LCodeGen::DoMathLog(LMathLog* instr) {
3733  __ PrepareCallCFunction(0, 1, scratch0());
3734  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3735  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
3736  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3737}
3738
3739void LCodeGen::DoMathClz32(LMathClz32* instr) {
3740  Register input = ToRegister(instr->value());
3741  Register result = ToRegister(instr->result());
3742  __ cntlzw_(result, input);
3743}
3744
3745void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3746                                  Register scratch1, Register scratch2,
3747                                  Register scratch3) {
3748#if DEBUG
3749  if (actual.is_reg()) {
3750    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3751  } else {
3752    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3753  }
3754#endif
3755  if (FLAG_code_comments) {
3756    if (actual.is_reg()) {
3757      Comment(";;; PrepareForTailCall, actual: %s {",
3758              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3759                  actual.reg().code()));
3760    } else {
3761      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3762    }
3763  }
3764
3765  // Check if next frame is an arguments adaptor frame.
3766  Register caller_args_count_reg = scratch1;
3767  Label no_arguments_adaptor, formal_parameter_count_loaded;
3768  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3769  __ LoadP(scratch3,
3770           MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3771  __ cmpi(scratch3,
3772          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
3773  __ bne(&no_arguments_adaptor);
3774
3775  // Drop current frame and load arguments count from arguments adaptor frame.
3776  __ mr(fp, scratch2);
3777  __ LoadP(caller_args_count_reg,
3778           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3779  __ SmiUntag(caller_args_count_reg);
3780  __ b(&formal_parameter_count_loaded);
3781
3782  __ bind(&no_arguments_adaptor);
3783  // Load caller's formal parameter count
3784  __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3785
3786  __ bind(&formal_parameter_count_loaded);
3787  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3788
3789  Comment(";;; }");
3790}
3791
3792void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3793  HInvokeFunction* hinstr = instr->hydrogen();
3794  DCHECK(ToRegister(instr->context()).is(cp));
3795  DCHECK(ToRegister(instr->function()).is(r4));
3796  DCHECK(instr->HasPointerMap());
3797
3798  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3799
3800  if (is_tail_call) {
3801    DCHECK(!info()->saves_caller_doubles());
3802    ParameterCount actual(instr->arity());
3803    // It is safe to use r6, r7 and r8 as scratch registers here given that
3804    // 1) we are not going to return to caller function anyway,
3805    // 2) r6 (new.target) will be initialized below.
3806    PrepareForTailCall(actual, r6, r7, r8);
3807  }
3808
3809  Handle<JSFunction> known_function = hinstr->known_function();
3810  if (known_function.is_null()) {
3811    LPointerMap* pointers = instr->pointer_map();
3812    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3813    ParameterCount actual(instr->arity());
3814    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3815    __ InvokeFunction(r4, no_reg, actual, flag, generator);
3816  } else {
3817    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3818                      instr->arity(), is_tail_call, instr);
3819  }
3820}
3821
3822
3823void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3824  DCHECK(ToRegister(instr->result()).is(r3));
3825
3826  if (instr->hydrogen()->IsTailCall()) {
3827    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3828
3829    if (instr->target()->IsConstantOperand()) {
3830      LConstantOperand* target = LConstantOperand::cast(instr->target());
3831      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3832      __ Jump(code, RelocInfo::CODE_TARGET);
3833    } else {
3834      DCHECK(instr->target()->IsRegister());
3835      Register target = ToRegister(instr->target());
3836      __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3837      __ JumpToJSEntry(ip);
3838    }
3839  } else {
3840    LPointerMap* pointers = instr->pointer_map();
3841    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3842
3843    if (instr->target()->IsConstantOperand()) {
3844      LConstantOperand* target = LConstantOperand::cast(instr->target());
3845      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3846      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3847      __ Call(code, RelocInfo::CODE_TARGET);
3848    } else {
3849      DCHECK(instr->target()->IsRegister());
3850      Register target = ToRegister(instr->target());
3851      generator.BeforeCall(__ CallSize(target));
3852      __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3853      __ CallJSEntry(ip);
3854    }
3855    generator.AfterCall();
3856  }
3857}
3858
3859
3860void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3861  DCHECK(ToRegister(instr->context()).is(cp));
3862  DCHECK(ToRegister(instr->constructor()).is(r4));
3863  DCHECK(ToRegister(instr->result()).is(r3));
3864
3865  __ mov(r3, Operand(instr->arity()));
3866  __ Move(r5, instr->hydrogen()->site());
3867
3868  ElementsKind kind = instr->hydrogen()->elements_kind();
3869  AllocationSiteOverrideMode override_mode =
3870      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3871          ? DISABLE_ALLOCATION_SITES
3872          : DONT_OVERRIDE;
3873
3874  if (instr->arity() == 0) {
3875    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3876    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3877  } else if (instr->arity() == 1) {
3878    Label done;
3879    if (IsFastPackedElementsKind(kind)) {
3880      Label packed_case;
3881      // We might need a change here
3882      // look at the first argument
3883      __ LoadP(r8, MemOperand(sp, 0));
3884      __ cmpi(r8, Operand::Zero());
3885      __ beq(&packed_case);
3886
3887      ElementsKind holey_kind = GetHoleyElementsKind(kind);
3888      ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
3889                                              override_mode);
3890      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3891      __ b(&done);
3892      __ bind(&packed_case);
3893    }
3894
3895    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3896    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3897    __ bind(&done);
3898  } else {
3899    ArrayNArgumentsConstructorStub stub(isolate());
3900    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3901  }
3902}
3903
3904
3905void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3906  CallRuntime(instr->function(), instr->arity(), instr);
3907}
3908
3909
3910void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3911  Register function = ToRegister(instr->function());
3912  Register code_object = ToRegister(instr->code_object());
3913  __ addi(code_object, code_object,
3914          Operand(Code::kHeaderSize - kHeapObjectTag));
3915  __ StoreP(code_object,
3916            FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
3917}
3918
3919
3920void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3921  Register result = ToRegister(instr->result());
3922  Register base = ToRegister(instr->base_object());
3923  if (instr->offset()->IsConstantOperand()) {
3924    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3925    __ Add(result, base, ToInteger32(offset), r0);
3926  } else {
3927    Register offset = ToRegister(instr->offset());
3928    __ add(result, base, offset);
3929  }
3930}
3931
3932
3933void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3934  HStoreNamedField* hinstr = instr->hydrogen();
3935  Representation representation = instr->representation();
3936
3937  Register object = ToRegister(instr->object());
3938  Register scratch = scratch0();
3939  HObjectAccess access = hinstr->access();
3940  int offset = access.offset();
3941
3942  if (access.IsExternalMemory()) {
3943    Register value = ToRegister(instr->value());
3944    MemOperand operand = MemOperand(object, offset);
3945    __ StoreRepresentation(value, operand, representation, r0);
3946    return;
3947  }
3948
3949  __ AssertNotSmi(object);
3950
3951#if V8_TARGET_ARCH_PPC64
3952  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3953         IsInteger32(LConstantOperand::cast(instr->value())));
3954#else
3955  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3956         IsSmi(LConstantOperand::cast(instr->value())));
3957#endif
3958  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3959    DCHECK(access.IsInobject());
3960    DCHECK(!hinstr->has_transition());
3961    DCHECK(!hinstr->NeedsWriteBarrier());
3962    DoubleRegister value = ToDoubleRegister(instr->value());
3963    __ stfd(value, FieldMemOperand(object, offset));
3964    return;
3965  }
3966
3967  if (hinstr->has_transition()) {
3968    Handle<Map> transition = hinstr->transition_map();
3969    AddDeprecationDependency(transition);
3970    __ mov(scratch, Operand(transition));
3971    __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
3972    if (hinstr->NeedsWriteBarrierForMap()) {
3973      Register temp = ToRegister(instr->temp());
3974      // Update the write barrier for the map field.
3975      __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
3976                           kSaveFPRegs);
3977    }
3978  }
3979
3980  // Do the store.
3981  Register record_dest = object;
3982  Register record_value = no_reg;
3983  Register record_scratch = scratch;
3984#if V8_TARGET_ARCH_PPC64
3985  if (FLAG_unbox_double_fields && representation.IsDouble()) {
3986    DCHECK(access.IsInobject());
3987    DoubleRegister value = ToDoubleRegister(instr->value());
3988    __ stfd(value, FieldMemOperand(object, offset));
3989    if (hinstr->NeedsWriteBarrier()) {
3990      record_value = ToRegister(instr->value());
3991    }
3992  } else {
3993    if (representation.IsSmi() &&
3994        hinstr->value()->representation().IsInteger32()) {
3995      DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3996      // 64-bit Smi optimization
3997      // Store int value directly to upper half of the smi.
3998      offset = SmiWordOffset(offset);
3999      representation = Representation::Integer32();
4000    }
4001#endif
4002    if (access.IsInobject()) {
4003      Register value = ToRegister(instr->value());
4004      MemOperand operand = FieldMemOperand(object, offset);
4005      __ StoreRepresentation(value, operand, representation, r0);
4006      record_value = value;
4007    } else {
4008      Register value = ToRegister(instr->value());
4009      __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4010      MemOperand operand = FieldMemOperand(scratch, offset);
4011      __ StoreRepresentation(value, operand, representation, r0);
4012      record_dest = scratch;
4013      record_value = value;
4014      record_scratch = object;
4015    }
4016#if V8_TARGET_ARCH_PPC64
4017  }
4018#endif
4019
4020  if (hinstr->NeedsWriteBarrier()) {
4021    __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4022                        GetLinkRegisterState(), kSaveFPRegs,
4023                        EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4024                        hinstr->PointersToHereCheckForValue());
4025  }
4026}
4027
4028
4029void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4030  Representation representation = instr->hydrogen()->length()->representation();
4031  DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4032  DCHECK(representation.IsSmiOrInteger32());
4033
4034  Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4035  if (instr->length()->IsConstantOperand()) {
4036    int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4037    Register index = ToRegister(instr->index());
4038    if (representation.IsSmi()) {
4039      __ CmplSmiLiteral(index, Smi::FromInt(length), r0);
4040    } else {
4041      __ Cmplwi(index, Operand(length), r0);
4042    }
4043    cc = CommuteCondition(cc);
4044  } else if (instr->index()->IsConstantOperand()) {
4045    int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4046    Register length = ToRegister(instr->length());
4047    if (representation.IsSmi()) {
4048      __ CmplSmiLiteral(length, Smi::FromInt(index), r0);
4049    } else {
4050      __ Cmplwi(length, Operand(index), r0);
4051    }
4052  } else {
4053    Register index = ToRegister(instr->index());
4054    Register length = ToRegister(instr->length());
4055    if (representation.IsSmi()) {
4056      __ cmpl(length, index);
4057    } else {
4058      __ cmplw(length, index);
4059    }
4060  }
4061  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4062    Label done;
4063    __ b(NegateCondition(cc), &done);
4064    __ stop("eliminated bounds check failed");
4065    __ bind(&done);
4066  } else {
4067    DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
4068  }
4069}
4070
4071
4072void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4073  Register external_pointer = ToRegister(instr->elements());
4074  Register key = no_reg;
4075  ElementsKind elements_kind = instr->elements_kind();
4076  bool key_is_constant = instr->key()->IsConstantOperand();
4077  int constant_key = 0;
4078  if (key_is_constant) {
4079    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4080    if (constant_key & 0xF0000000) {
4081      Abort(kArrayIndexConstantValueTooBig);
4082    }
4083  } else {
4084    key = ToRegister(instr->key());
4085  }
4086  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4087  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4088  int base_offset = instr->base_offset();
4089
4090  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4091    Register address = scratch0();
4092    DoubleRegister value(ToDoubleRegister(instr->value()));
4093    if (key_is_constant) {
4094      if (constant_key != 0) {
4095        __ Add(address, external_pointer, constant_key << element_size_shift,
4096               r0);
4097      } else {
4098        address = external_pointer;
4099      }
4100    } else {
4101      __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4102      __ add(address, external_pointer, r0);
4103    }
4104    if (elements_kind == FLOAT32_ELEMENTS) {
4105      __ frsp(double_scratch0(), value);
4106      __ stfs(double_scratch0(), MemOperand(address, base_offset));
4107    } else {  // Storing doubles, not floats.
4108      __ stfd(value, MemOperand(address, base_offset));
4109    }
4110  } else {
4111    Register value(ToRegister(instr->value()));
4112    MemOperand mem_operand =
4113        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4114                            constant_key, element_size_shift, base_offset);
4115    switch (elements_kind) {
4116      case UINT8_ELEMENTS:
4117      case UINT8_CLAMPED_ELEMENTS:
4118      case INT8_ELEMENTS:
4119        if (key_is_constant) {
4120          __ StoreByte(value, mem_operand, r0);
4121        } else {
4122          __ stbx(value, mem_operand);
4123        }
4124        break;
4125      case INT16_ELEMENTS:
4126      case UINT16_ELEMENTS:
4127        if (key_is_constant) {
4128          __ StoreHalfWord(value, mem_operand, r0);
4129        } else {
4130          __ sthx(value, mem_operand);
4131        }
4132        break;
4133      case INT32_ELEMENTS:
4134      case UINT32_ELEMENTS:
4135        if (key_is_constant) {
4136          __ StoreWord(value, mem_operand, r0);
4137        } else {
4138          __ stwx(value, mem_operand);
4139        }
4140        break;
4141      case FLOAT32_ELEMENTS:
4142      case FLOAT64_ELEMENTS:
4143      case FAST_DOUBLE_ELEMENTS:
4144      case FAST_ELEMENTS:
4145      case FAST_SMI_ELEMENTS:
4146      case FAST_HOLEY_DOUBLE_ELEMENTS:
4147      case FAST_HOLEY_ELEMENTS:
4148      case FAST_HOLEY_SMI_ELEMENTS:
4149      case DICTIONARY_ELEMENTS:
4150      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4151      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4152      case FAST_STRING_WRAPPER_ELEMENTS:
4153      case SLOW_STRING_WRAPPER_ELEMENTS:
4154      case NO_ELEMENTS:
4155        UNREACHABLE();
4156        break;
4157    }
4158  }
4159}
4160
4161
4162void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4163  DoubleRegister value = ToDoubleRegister(instr->value());
4164  Register elements = ToRegister(instr->elements());
4165  Register key = no_reg;
4166  Register scratch = scratch0();
4167  DoubleRegister double_scratch = double_scratch0();
4168  bool key_is_constant = instr->key()->IsConstantOperand();
4169  int constant_key = 0;
4170
4171  // Calculate the effective address of the slot in the array to store the
4172  // double value.
4173  if (key_is_constant) {
4174    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4175    if (constant_key & 0xF0000000) {
4176      Abort(kArrayIndexConstantValueTooBig);
4177    }
4178  } else {
4179    key = ToRegister(instr->key());
4180  }
4181  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4182  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4183  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4184  if (!key_is_constant) {
4185    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4186    __ add(scratch, elements, scratch);
4187    elements = scratch;
4188  }
4189  if (!is_int16(base_offset)) {
4190    __ Add(scratch, elements, base_offset, r0);
4191    base_offset = 0;
4192    elements = scratch;
4193  }
4194
4195  if (instr->NeedsCanonicalization()) {
4196    // Turn potential sNaN value into qNaN.
4197    __ CanonicalizeNaN(double_scratch, value);
4198    __ stfd(double_scratch, MemOperand(elements, base_offset));
4199  } else {
4200    __ stfd(value, MemOperand(elements, base_offset));
4201  }
4202}
4203
4204
4205void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4206  HStoreKeyed* hinstr = instr->hydrogen();
4207  Register value = ToRegister(instr->value());
4208  Register elements = ToRegister(instr->elements());
4209  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4210  Register scratch = scratch0();
4211  Register store_base = scratch;
4212  int offset = instr->base_offset();
4213
4214  // Do the store.
4215  if (instr->key()->IsConstantOperand()) {
4216    DCHECK(!hinstr->NeedsWriteBarrier());
4217    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4218    offset += ToInteger32(const_operand) * kPointerSize;
4219    store_base = elements;
4220  } else {
4221    // Even though the HLoadKeyed instruction forces the input
4222    // representation for the key to be an integer, the input gets replaced
4223    // during bound check elimination with the index argument to the bounds
4224    // check, which can be tagged, so that case must be handled here, too.
4225    if (hinstr->key()->representation().IsSmi()) {
4226      __ SmiToPtrArrayOffset(scratch, key);
4227    } else {
4228      __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4229    }
4230    __ add(scratch, elements, scratch);
4231  }
4232
4233  Representation representation = hinstr->value()->representation();
4234
4235#if V8_TARGET_ARCH_PPC64
4236  // 64-bit Smi optimization
4237  if (representation.IsInteger32()) {
4238    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4239    DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4240    // Store int value directly to upper half of the smi.
4241    offset = SmiWordOffset(offset);
4242  }
4243#endif
4244
4245  __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4246                         r0);
4247
4248  if (hinstr->NeedsWriteBarrier()) {
4249    SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4250                                ? OMIT_SMI_CHECK
4251                                : INLINE_SMI_CHECK;
4252    // Compute address of modified element and store it into key register.
4253    __ Add(key, store_base, offset, r0);
4254    __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4255                   EMIT_REMEMBERED_SET, check_needed,
4256                   hinstr->PointersToHereCheckForValue());
4257  }
4258}
4259
4260
4261void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4262  // By cases: external, fast double
4263  if (instr->is_fixed_typed_array()) {
4264    DoStoreKeyedExternalArray(instr);
4265  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4266    DoStoreKeyedFixedDoubleArray(instr);
4267  } else {
4268    DoStoreKeyedFixedArray(instr);
4269  }
4270}
4271
4272
4273void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4274  class DeferredMaybeGrowElements final : public LDeferredCode {
4275   public:
4276    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4277        : LDeferredCode(codegen), instr_(instr) {}
4278    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4279    LInstruction* instr() override { return instr_; }
4280
4281   private:
4282    LMaybeGrowElements* instr_;
4283  };
4284
4285  Register result = r3;
4286  DeferredMaybeGrowElements* deferred =
4287      new (zone()) DeferredMaybeGrowElements(this, instr);
4288  LOperand* key = instr->key();
4289  LOperand* current_capacity = instr->current_capacity();
4290
4291  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4292  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4293  DCHECK(key->IsConstantOperand() || key->IsRegister());
4294  DCHECK(current_capacity->IsConstantOperand() ||
4295         current_capacity->IsRegister());
4296
4297  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4298    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4299    int32_t constant_capacity =
4300        ToInteger32(LConstantOperand::cast(current_capacity));
4301    if (constant_key >= constant_capacity) {
4302      // Deferred case.
4303      __ b(deferred->entry());
4304    }
4305  } else if (key->IsConstantOperand()) {
4306    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4307    __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
4308    __ ble(deferred->entry());
4309  } else if (current_capacity->IsConstantOperand()) {
4310    int32_t constant_capacity =
4311        ToInteger32(LConstantOperand::cast(current_capacity));
4312    __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
4313    __ bge(deferred->entry());
4314  } else {
4315    __ cmpw(ToRegister(key), ToRegister(current_capacity));
4316    __ bge(deferred->entry());
4317  }
4318
4319  if (instr->elements()->IsRegister()) {
4320    __ Move(result, ToRegister(instr->elements()));
4321  } else {
4322    __ LoadP(result, ToMemOperand(instr->elements()));
4323  }
4324
4325  __ bind(deferred->exit());
4326}
4327
4328
4329void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4330  // TODO(3095996): Get rid of this. For now, we need to make the
4331  // result register contain a valid pointer because it is already
4332  // contained in the register pointer map.
4333  Register result = r3;
4334  __ li(result, Operand::Zero());
4335
4336  // We have to call a stub.
4337  {
4338    PushSafepointRegistersScope scope(this);
4339    if (instr->object()->IsRegister()) {
4340      __ Move(result, ToRegister(instr->object()));
4341    } else {
4342      __ LoadP(result, ToMemOperand(instr->object()));
4343    }
4344
4345    LOperand* key = instr->key();
4346    if (key->IsConstantOperand()) {
4347      LConstantOperand* constant_key = LConstantOperand::cast(key);
4348      int32_t int_key = ToInteger32(constant_key);
4349      if (Smi::IsValid(int_key)) {
4350        __ LoadSmiLiteral(r6, Smi::FromInt(int_key));
4351      } else {
4352        Abort(kArrayIndexConstantValueTooBig);
4353      }
4354    } else {
4355      Label is_smi;
4356#if V8_TARGET_ARCH_PPC64
4357      __ SmiTag(r6, ToRegister(key));
4358#else
4359      // Deopt if the key is outside Smi range. The stub expects Smi and would
4360      // bump the elements into dictionary mode (and trigger a deopt) anyways.
4361      __ SmiTagCheckOverflow(r6, ToRegister(key), r0);
4362      __ BranchOnNoOverflow(&is_smi);
4363      __ PopSafepointRegisters();
4364      DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
4365      __ bind(&is_smi);
4366#endif
4367    }
4368
4369    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
4370    __ CallStub(&stub);
4371    RecordSafepointWithLazyDeopt(
4372        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4373    __ StoreToSafepointRegisterSlot(result, result);
4374  }
4375
4376  // Deopt on smi, which means the elements array changed to dictionary mode.
4377  __ TestIfSmi(result, r0);
4378  DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
4379}
4380
4381
4382void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4383  Register object_reg = ToRegister(instr->object());
4384  Register scratch = scratch0();
4385
4386  Handle<Map> from_map = instr->original_map();
4387  Handle<Map> to_map = instr->transitioned_map();
4388  ElementsKind from_kind = instr->from_kind();
4389  ElementsKind to_kind = instr->to_kind();
4390
4391  Label not_applicable;
4392  __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4393  __ Cmpi(scratch, Operand(from_map), r0);
4394  __ bne(&not_applicable);
4395
4396  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4397    Register new_map_reg = ToRegister(instr->new_map_temp());
4398    __ mov(new_map_reg, Operand(to_map));
4399    __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4400              r0);
4401    // Write barrier.
4402    __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4403                         GetLinkRegisterState(), kDontSaveFPRegs);
4404  } else {
4405    DCHECK(ToRegister(instr->context()).is(cp));
4406    DCHECK(object_reg.is(r3));
4407    PushSafepointRegistersScope scope(this);
4408    __ Move(r4, to_map);
4409    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4410    __ CallStub(&stub);
4411    RecordSafepointWithRegisters(instr->pointer_map(), 0,
4412                                 Safepoint::kLazyDeopt);
4413  }
4414  __ bind(&not_applicable);
4415}
4416
4417
4418void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4419  Register object = ToRegister(instr->object());
4420  Register temp1 = ToRegister(instr->temp1());
4421  Register temp2 = ToRegister(instr->temp2());
4422  Label no_memento_found;
4423  __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
4424  DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
4425  __ bind(&no_memento_found);
4426}
4427
4428
4429void LCodeGen::DoStringAdd(LStringAdd* instr) {
4430  DCHECK(ToRegister(instr->context()).is(cp));
4431  DCHECK(ToRegister(instr->left()).is(r4));
4432  DCHECK(ToRegister(instr->right()).is(r3));
4433  StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4434                     instr->hydrogen()->pretenure_flag());
4435  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4436}
4437
4438
4439void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4440  class DeferredStringCharCodeAt final : public LDeferredCode {
4441   public:
4442    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4443        : LDeferredCode(codegen), instr_(instr) {}
4444    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4445    LInstruction* instr() override { return instr_; }
4446
4447   private:
4448    LStringCharCodeAt* instr_;
4449  };
4450
4451  DeferredStringCharCodeAt* deferred =
4452      new (zone()) DeferredStringCharCodeAt(this, instr);
4453
4454  StringCharLoadGenerator::Generate(
4455      masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4456      ToRegister(instr->result()), deferred->entry());
4457  __ bind(deferred->exit());
4458}
4459
4460
4461void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4462  Register string = ToRegister(instr->string());
4463  Register result = ToRegister(instr->result());
4464  Register scratch = scratch0();
4465
4466  // TODO(3095996): Get rid of this. For now, we need to make the
4467  // result register contain a valid pointer because it is already
4468  // contained in the register pointer map.
4469  __ li(result, Operand::Zero());
4470
4471  PushSafepointRegistersScope scope(this);
4472  __ push(string);
4473  // Push the index as a smi. This is safe because of the checks in
4474  // DoStringCharCodeAt above.
4475  if (instr->index()->IsConstantOperand()) {
4476    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4477    __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4478    __ push(scratch);
4479  } else {
4480    Register index = ToRegister(instr->index());
4481    __ SmiTag(index);
4482    __ push(index);
4483  }
4484  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4485                          instr->context());
4486  __ AssertSmi(r3);
4487  __ SmiUntag(r3);
4488  __ StoreToSafepointRegisterSlot(r3, result);
4489}
4490
4491
4492void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4493  class DeferredStringCharFromCode final : public LDeferredCode {
4494   public:
4495    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4496        : LDeferredCode(codegen), instr_(instr) {}
4497    void Generate() override {
4498      codegen()->DoDeferredStringCharFromCode(instr_);
4499    }
4500    LInstruction* instr() override { return instr_; }
4501
4502   private:
4503    LStringCharFromCode* instr_;
4504  };
4505
4506  DeferredStringCharFromCode* deferred =
4507      new (zone()) DeferredStringCharFromCode(this, instr);
4508
4509  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4510  Register char_code = ToRegister(instr->char_code());
4511  Register result = ToRegister(instr->result());
4512  DCHECK(!char_code.is(result));
4513
4514  __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4515  __ bgt(deferred->entry());
4516  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4517  __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4518  __ add(result, result, r0);
4519  __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4520  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4521  __ cmp(result, ip);
4522  __ beq(deferred->entry());
4523  __ bind(deferred->exit());
4524}
4525
4526
4527void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4528  Register char_code = ToRegister(instr->char_code());
4529  Register result = ToRegister(instr->result());
4530
4531  // TODO(3095996): Get rid of this. For now, we need to make the
4532  // result register contain a valid pointer because it is already
4533  // contained in the register pointer map.
4534  __ li(result, Operand::Zero());
4535
4536  PushSafepointRegistersScope scope(this);
4537  __ SmiTag(char_code);
4538  __ push(char_code);
4539  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4540                          instr->context());
4541  __ StoreToSafepointRegisterSlot(r3, result);
4542}
4543
4544
4545void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4546  LOperand* input = instr->value();
4547  DCHECK(input->IsRegister() || input->IsStackSlot());
4548  LOperand* output = instr->result();
4549  DCHECK(output->IsDoubleRegister());
4550  if (input->IsStackSlot()) {
4551    Register scratch = scratch0();
4552    __ LoadP(scratch, ToMemOperand(input));
4553    __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4554  } else {
4555    __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4556  }
4557}
4558
4559
4560void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4561  LOperand* input = instr->value();
4562  LOperand* output = instr->result();
4563  __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4564}
4565
4566
4567void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4568  class DeferredNumberTagI final : public LDeferredCode {
4569   public:
4570    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4571        : LDeferredCode(codegen), instr_(instr) {}
4572    void Generate() override {
4573      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4574                                       instr_->temp2(), SIGNED_INT32);
4575    }
4576    LInstruction* instr() override { return instr_; }
4577
4578   private:
4579    LNumberTagI* instr_;
4580  };
4581
4582  Register src = ToRegister(instr->value());
4583  Register dst = ToRegister(instr->result());
4584
4585  DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4586#if V8_TARGET_ARCH_PPC64
4587  __ SmiTag(dst, src);
4588#else
4589  __ SmiTagCheckOverflow(dst, src, r0);
4590  __ BranchOnOverflow(deferred->entry());
4591#endif
4592  __ bind(deferred->exit());
4593}
4594
4595
4596void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4597  class DeferredNumberTagU final : public LDeferredCode {
4598   public:
4599    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4600        : LDeferredCode(codegen), instr_(instr) {}
4601    void Generate() override {
4602      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4603                                       instr_->temp2(), UNSIGNED_INT32);
4604    }
4605    LInstruction* instr() override { return instr_; }
4606
4607   private:
4608    LNumberTagU* instr_;
4609  };
4610
4611  Register input = ToRegister(instr->value());
4612  Register result = ToRegister(instr->result());
4613
4614  DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4615  __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4616  __ bgt(deferred->entry());
4617  __ SmiTag(result, input);
4618  __ bind(deferred->exit());
4619}
4620
4621
4622void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4623                                     LOperand* temp1, LOperand* temp2,
4624                                     IntegerSignedness signedness) {
4625  Label done, slow;
4626  Register src = ToRegister(value);
4627  Register dst = ToRegister(instr->result());
4628  Register tmp1 = scratch0();
4629  Register tmp2 = ToRegister(temp1);
4630  Register tmp3 = ToRegister(temp2);
4631  DoubleRegister dbl_scratch = double_scratch0();
4632
4633  if (signedness == SIGNED_INT32) {
4634    // There was overflow, so bits 30 and 31 of the original integer
4635    // disagree. Try to allocate a heap number in new space and store
4636    // the value in there. If that fails, call the runtime system.
4637    if (dst.is(src)) {
4638      __ SmiUntag(src, dst);
4639      __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
4640    }
4641    __ ConvertIntToDouble(src, dbl_scratch);
4642  } else {
4643    __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4644  }
4645
4646  if (FLAG_inline_new) {
4647    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4648    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4649    __ b(&done);
4650  }
4651
4652  // Slow case: Call the runtime system to do the number allocation.
4653  __ bind(&slow);
4654  {
4655    // TODO(3095996): Put a valid pointer value in the stack slot where the
4656    // result register is stored, as this register is in the pointer map, but
4657    // contains an integer value.
4658    __ li(dst, Operand::Zero());
4659
4660    // Preserve the value of all registers.
4661    PushSafepointRegistersScope scope(this);
4662    // Reset the context register.
4663    if (!dst.is(cp)) {
4664      __ li(cp, Operand::Zero());
4665    }
4666    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4667    RecordSafepointWithRegisters(instr->pointer_map(), 0,
4668                                 Safepoint::kNoLazyDeopt);
4669    __ StoreToSafepointRegisterSlot(r3, dst);
4670  }
4671
4672  // Done. Put the value in dbl_scratch into the value of the allocated heap
4673  // number.
4674  __ bind(&done);
4675  __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4676}
4677
4678
4679void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4680  class DeferredNumberTagD final : public LDeferredCode {
4681   public:
4682    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4683        : LDeferredCode(codegen), instr_(instr) {}
4684    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4685    LInstruction* instr() override { return instr_; }
4686
4687   private:
4688    LNumberTagD* instr_;
4689  };
4690
4691  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4692  Register scratch = scratch0();
4693  Register reg = ToRegister(instr->result());
4694  Register temp1 = ToRegister(instr->temp());
4695  Register temp2 = ToRegister(instr->temp2());
4696
4697  DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4698  if (FLAG_inline_new) {
4699    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4700    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4701  } else {
4702    __ b(deferred->entry());
4703  }
4704  __ bind(deferred->exit());
4705  __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4706}
4707
4708
4709void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4710  // TODO(3095996): Get rid of this. For now, we need to make the
4711  // result register contain a valid pointer because it is already
4712  // contained in the register pointer map.
4713  Register reg = ToRegister(instr->result());
4714  __ li(reg, Operand::Zero());
4715
4716  PushSafepointRegistersScope scope(this);
4717  // Reset the context register.
4718  if (!reg.is(cp)) {
4719    __ li(cp, Operand::Zero());
4720  }
4721  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4722  RecordSafepointWithRegisters(instr->pointer_map(), 0,
4723                               Safepoint::kNoLazyDeopt);
4724  __ StoreToSafepointRegisterSlot(r3, reg);
4725}
4726
4727
4728void LCodeGen::DoSmiTag(LSmiTag* instr) {
4729  HChange* hchange = instr->hydrogen();
4730  Register input = ToRegister(instr->value());
4731  Register output = ToRegister(instr->result());
4732  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4733      hchange->value()->CheckFlag(HValue::kUint32)) {
4734    __ TestUnsignedSmiCandidate(input, r0);
4735    DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
4736  }
4737#if !V8_TARGET_ARCH_PPC64
4738  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4739      !hchange->value()->CheckFlag(HValue::kUint32)) {
4740    __ SmiTagCheckOverflow(output, input, r0);
4741    DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
4742  } else {
4743#endif
4744    __ SmiTag(output, input);
4745#if !V8_TARGET_ARCH_PPC64
4746  }
4747#endif
4748}
4749
4750
4751void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4752  Register scratch = scratch0();
4753  Register input = ToRegister(instr->value());
4754  Register result = ToRegister(instr->result());
4755  if (instr->needs_check()) {
4756    // If the input is a HeapObject, value of scratch won't be zero.
4757    __ andi(scratch, input, Operand(kHeapObjectTag));
4758    __ SmiUntag(result, input);
4759    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
4760  } else {
4761    __ SmiUntag(result, input);
4762  }
4763}
4764
4765
4766void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4767                                DoubleRegister result_reg,
4768                                NumberUntagDMode mode) {
4769  bool can_convert_undefined_to_nan = instr->truncating();
4770  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4771
4772  Register scratch = scratch0();
4773  DCHECK(!result_reg.is(double_scratch0()));
4774
4775  Label convert, load_smi, done;
4776
4777  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4778    // Smi check.
4779    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4780
4781    // Heap number map check.
4782    __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4783    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4784    __ cmp(scratch, ip);
4785    if (can_convert_undefined_to_nan) {
4786      __ bne(&convert);
4787    } else {
4788      DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
4789    }
4790    // load heap number
4791    __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4792    if (deoptimize_on_minus_zero) {
4793      __ TestDoubleIsMinusZero(result_reg, scratch, ip);
4794      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
4795    }
4796    __ b(&done);
4797    if (can_convert_undefined_to_nan) {
4798      __ bind(&convert);
4799      // Convert undefined (and hole) to NaN.
4800      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4801      __ cmp(input_reg, ip);
4802      DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
4803      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4804      __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4805      __ b(&done);
4806    }
4807  } else {
4808    __ SmiUntag(scratch, input_reg);
4809    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4810  }
4811  // Smi to double register conversion
4812  __ bind(&load_smi);
4813  // scratch: untagged value of input_reg
4814  __ ConvertIntToDouble(scratch, result_reg);
4815  __ bind(&done);
4816}
4817
4818
4819void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4820  Register input_reg = ToRegister(instr->value());
4821  Register scratch1 = scratch0();
4822  Register scratch2 = ToRegister(instr->temp());
4823  DoubleRegister double_scratch = double_scratch0();
4824  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4825
4826  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4827  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4828
4829  Label done;
4830
4831  // Heap number map check.
4832  __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4833  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4834  __ cmp(scratch1, ip);
4835
4836  if (instr->truncating()) {
4837    Label truncate;
4838    __ beq(&truncate);
4839    __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
4840    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
4841    __ bind(&truncate);
4842    __ mr(scratch2, input_reg);
4843    __ TruncateHeapNumberToI(input_reg, scratch2);
4844  } else {
4845    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
4846
4847    __ lfd(double_scratch2,
4848           FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4849    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4850      // preserve heap number pointer in scratch2 for minus zero check below
4851      __ mr(scratch2, input_reg);
4852    }
4853    __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
4854                             double_scratch);
4855    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4856
4857    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4858      __ cmpi(input_reg, Operand::Zero());
4859      __ bne(&done);
4860      __ TestHeapNumberSign(scratch2, scratch1);
4861      DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4862    }
4863  }
4864  __ bind(&done);
4865}
4866
4867
4868void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4869  class DeferredTaggedToI final : public LDeferredCode {
4870   public:
4871    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4872        : LDeferredCode(codegen), instr_(instr) {}
4873    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4874    LInstruction* instr() override { return instr_; }
4875
4876   private:
4877    LTaggedToI* instr_;
4878  };
4879
4880  LOperand* input = instr->value();
4881  DCHECK(input->IsRegister());
4882  DCHECK(input->Equals(instr->result()));
4883
4884  Register input_reg = ToRegister(input);
4885
4886  if (instr->hydrogen()->value()->representation().IsSmi()) {
4887    __ SmiUntag(input_reg);
4888  } else {
4889    DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
4890
4891    // Branch to deferred code if the input is a HeapObject.
4892    __ JumpIfNotSmi(input_reg, deferred->entry());
4893
4894    __ SmiUntag(input_reg);
4895    __ bind(deferred->exit());
4896  }
4897}
4898
4899
4900void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4901  LOperand* input = instr->value();
4902  DCHECK(input->IsRegister());
4903  LOperand* result = instr->result();
4904  DCHECK(result->IsDoubleRegister());
4905
4906  Register input_reg = ToRegister(input);
4907  DoubleRegister result_reg = ToDoubleRegister(result);
4908
4909  HValue* value = instr->hydrogen()->value();
4910  NumberUntagDMode mode = value->representation().IsSmi()
4911                              ? NUMBER_CANDIDATE_IS_SMI
4912                              : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4913
4914  EmitNumberUntagD(instr, input_reg, result_reg, mode);
4915}
4916
4917
4918void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4919  Register result_reg = ToRegister(instr->result());
4920  Register scratch1 = scratch0();
4921  DoubleRegister double_input = ToDoubleRegister(instr->value());
4922  DoubleRegister double_scratch = double_scratch0();
4923
4924  if (instr->truncating()) {
4925    __ TruncateDoubleToI(result_reg, double_input);
4926  } else {
4927    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4928                             double_scratch);
4929    // Deoptimize if the input wasn't a int32 (inside a double).
4930    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4931    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4932      Label done;
4933      __ cmpi(result_reg, Operand::Zero());
4934      __ bne(&done);
4935      __ TestDoubleSign(double_input, scratch1);
4936      DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4937      __ bind(&done);
4938    }
4939  }
4940}
4941
4942
4943void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4944  Register result_reg = ToRegister(instr->result());
4945  Register scratch1 = scratch0();
4946  DoubleRegister double_input = ToDoubleRegister(instr->value());
4947  DoubleRegister double_scratch = double_scratch0();
4948
4949  if (instr->truncating()) {
4950    __ TruncateDoubleToI(result_reg, double_input);
4951  } else {
4952    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4953                             double_scratch);
4954    // Deoptimize if the input wasn't a int32 (inside a double).
4955    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
4956    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4957      Label done;
4958      __ cmpi(result_reg, Operand::Zero());
4959      __ bne(&done);
4960      __ TestDoubleSign(double_input, scratch1);
4961      DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
4962      __ bind(&done);
4963    }
4964  }
4965#if V8_TARGET_ARCH_PPC64
4966  __ SmiTag(result_reg);
4967#else
4968  __ SmiTagCheckOverflow(result_reg, r0);
4969  DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
4970#endif
4971}
4972
4973
4974void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4975  LOperand* input = instr->value();
4976  __ TestIfSmi(ToRegister(input), r0);
4977  DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
4978}
4979
4980
4981void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4982  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4983    LOperand* input = instr->value();
4984    __ TestIfSmi(ToRegister(input), r0);
4985    DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
4986  }
4987}
4988
4989
4990void LCodeGen::DoCheckArrayBufferNotNeutered(
4991    LCheckArrayBufferNotNeutered* instr) {
4992  Register view = ToRegister(instr->view());
4993  Register scratch = scratch0();
4994
4995  __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4996  __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4997  __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
4998  DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
4999}
5000
5001
5002void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5003  Register input = ToRegister(instr->value());
5004  Register scratch = scratch0();
5005
5006  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5007  __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5008
5009  if (instr->hydrogen()->is_interval_check()) {
5010    InstanceType first;
5011    InstanceType last;
5012    instr->hydrogen()->GetCheckInterval(&first, &last);
5013
5014    __ cmpli(scratch, Operand(first));
5015
5016    // If there is only one type in the interval check for equality.
5017    if (first == last) {
5018      DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
5019    } else {
5020      DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
5021      // Omit check for the last type.
5022      if (last != LAST_TYPE) {
5023        __ cmpli(scratch, Operand(last));
5024        DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
5025      }
5026    }
5027  } else {
5028    uint8_t mask;
5029    uint8_t tag;
5030    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5031
5032    if (base::bits::IsPowerOfTwo32(mask)) {
5033      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5034      __ andi(r0, scratch, Operand(mask));
5035      DeoptimizeIf(tag == 0 ? ne : eq, instr,
5036                   DeoptimizeReason::kWrongInstanceType, cr0);
5037    } else {
5038      __ andi(scratch, scratch, Operand(mask));
5039      __ cmpi(scratch, Operand(tag));
5040      DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
5041    }
5042  }
5043}
5044
5045
5046void LCodeGen::DoCheckValue(LCheckValue* instr) {
5047  Register reg = ToRegister(instr->value());
5048  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5049  AllowDeferredHandleDereference smi_check;
5050  if (isolate()->heap()->InNewSpace(*object)) {
5051    Register reg = ToRegister(instr->value());
5052    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5053    __ mov(ip, Operand(cell));
5054    __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5055    __ cmp(reg, ip);
5056  } else {
5057    __ Cmpi(reg, Operand(object), r0);
5058  }
5059  DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
5060}
5061
5062
5063void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5064  Register temp = ToRegister(instr->temp());
5065  Label deopt, done;
5066  // If the map is not deprecated the migration attempt does not make sense.
5067  __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5068  __ lwz(temp, FieldMemOperand(temp, Map::kBitField3Offset));
5069  __ TestBitMask(temp, Map::Deprecated::kMask, r0);
5070  __ beq(&deopt, cr0);
5071
5072  {
5073    PushSafepointRegistersScope scope(this);
5074    __ push(object);
5075    __ li(cp, Operand::Zero());
5076    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5077    RecordSafepointWithRegisters(instr->pointer_map(), 1,
5078                                 Safepoint::kNoLazyDeopt);
5079    __ StoreToSafepointRegisterSlot(r3, temp);
5080  }
5081  __ TestIfSmi(temp, r0);
5082  __ bne(&done, cr0);
5083
5084  __ bind(&deopt);
5085  // In case of "al" condition the operand is not used so just pass cr0 there.
5086  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
5087
5088  __ bind(&done);
5089}
5090
5091
5092void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5093  class DeferredCheckMaps final : public LDeferredCode {
5094   public:
5095    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5096        : LDeferredCode(codegen), instr_(instr), object_(object) {
5097      SetExit(check_maps());
5098    }
5099    void Generate() override {
5100      codegen()->DoDeferredInstanceMigration(instr_, object_);
5101    }
5102    Label* check_maps() { return &check_maps_; }
5103    LInstruction* instr() override { return instr_; }
5104
5105   private:
5106    LCheckMaps* instr_;
5107    Label check_maps_;
5108    Register object_;
5109  };
5110
5111  if (instr->hydrogen()->IsStabilityCheck()) {
5112    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5113    for (int i = 0; i < maps->size(); ++i) {
5114      AddStabilityDependency(maps->at(i).handle());
5115    }
5116    return;
5117  }
5118
5119  Register object = ToRegister(instr->value());
5120  Register map_reg = ToRegister(instr->temp());
5121
5122  __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5123
5124  DeferredCheckMaps* deferred = NULL;
5125  if (instr->hydrogen()->HasMigrationTarget()) {
5126    deferred = new (zone()) DeferredCheckMaps(this, instr, object);
5127    __ bind(deferred->check_maps());
5128  }
5129
5130  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5131  Label success;
5132  for (int i = 0; i < maps->size() - 1; i++) {
5133    Handle<Map> map = maps->at(i).handle();
5134    __ CompareMap(map_reg, map, &success);
5135    __ beq(&success);
5136  }
5137
5138  Handle<Map> map = maps->at(maps->size() - 1).handle();
5139  __ CompareMap(map_reg, map, &success);
5140  if (instr->hydrogen()->HasMigrationTarget()) {
5141    __ bne(deferred->entry());
5142  } else {
5143    DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5144  }
5145
5146  __ bind(&success);
5147}
5148
5149
5150void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5151  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5152  Register result_reg = ToRegister(instr->result());
5153  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5154}
5155
5156
5157void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5158  Register unclamped_reg = ToRegister(instr->unclamped());
5159  Register result_reg = ToRegister(instr->result());
5160  __ ClampUint8(result_reg, unclamped_reg);
5161}
5162
5163
5164void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5165  Register scratch = scratch0();
5166  Register input_reg = ToRegister(instr->unclamped());
5167  Register result_reg = ToRegister(instr->result());
5168  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5169  Label is_smi, done, heap_number;
5170
5171  // Both smi and heap number cases are handled.
5172  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5173
5174  // Check for heap number
5175  __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5176  __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5177  __ beq(&heap_number);
5178
5179  // Check for undefined. Undefined is converted to zero for clamping
5180  // conversions.
5181  __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5182  DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
5183  __ li(result_reg, Operand::Zero());
5184  __ b(&done);
5185
5186  // Heap number
5187  __ bind(&heap_number);
5188  __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5189  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5190  __ b(&done);
5191
5192  // smi
5193  __ bind(&is_smi);
5194  __ ClampUint8(result_reg, result_reg);
5195
5196  __ bind(&done);
5197}
5198
5199
5200void LCodeGen::DoAllocate(LAllocate* instr) {
5201  class DeferredAllocate final : public LDeferredCode {
5202   public:
5203    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5204        : LDeferredCode(codegen), instr_(instr) { }
5205    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5206    LInstruction* instr() override { return instr_; }
5207
5208   private:
5209    LAllocate* instr_;
5210  };
5211
5212  DeferredAllocate* deferred =
5213      new(zone()) DeferredAllocate(this, instr);
5214
5215  Register result = ToRegister(instr->result());
5216  Register scratch = ToRegister(instr->temp1());
5217  Register scratch2 = ToRegister(instr->temp2());
5218
5219  // Allocate memory for the object.
5220  AllocationFlags flags = NO_ALLOCATION_FLAGS;
5221  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5222    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5223  }
5224  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5225    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5226    flags = static_cast<AllocationFlags>(flags | PRETENURE);
5227  }
5228
5229  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5230    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5231  }
5232
5233  DCHECK(!instr->hydrogen()->IsAllocationFolded());
5234
5235  if (instr->size()->IsConstantOperand()) {
5236    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5237    CHECK(size <= kMaxRegularHeapObjectSize);
5238    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5239  } else {
5240    Register size = ToRegister(instr->size());
5241    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5242  }
5243
5244  __ bind(deferred->exit());
5245
5246  if (instr->hydrogen()->MustPrefillWithFiller()) {
5247    if (instr->size()->IsConstantOperand()) {
5248      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5249      __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5250    } else {
5251      __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5252    }
5253    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5254    Label loop;
5255    __ bind(&loop);
5256    __ subi(scratch, scratch, Operand(kPointerSize));
5257    __ StorePX(scratch2, MemOperand(result, scratch));
5258    __ cmpi(scratch, Operand::Zero());
5259    __ bge(&loop);
5260  }
5261}
5262
5263
5264void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5265  Register result = ToRegister(instr->result());
5266
5267  // TODO(3095996): Get rid of this. For now, we need to make the
5268  // result register contain a valid pointer because it is already
5269  // contained in the register pointer map.
5270  __ LoadSmiLiteral(result, Smi::kZero);
5271
5272  PushSafepointRegistersScope scope(this);
5273  if (instr->size()->IsRegister()) {
5274    Register size = ToRegister(instr->size());
5275    DCHECK(!size.is(result));
5276    __ SmiTag(size);
5277    __ push(size);
5278  } else {
5279    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5280#if !V8_TARGET_ARCH_PPC64
5281    if (size >= 0 && size <= Smi::kMaxValue) {
5282#endif
5283      __ Push(Smi::FromInt(size));
5284#if !V8_TARGET_ARCH_PPC64
5285    } else {
5286      // We should never get here at runtime => abort
5287      __ stop("invalid allocation size");
5288      return;
5289    }
5290#endif
5291  }
5292
5293  int flags = AllocateDoubleAlignFlag::encode(
5294      instr->hydrogen()->MustAllocateDoubleAligned());
5295  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5296    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5297    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5298  } else {
5299    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5300  }
5301  __ Push(Smi::FromInt(flags));
5302
5303  CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5304                          instr->context());
5305  __ StoreToSafepointRegisterSlot(r3, result);
5306
5307  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5308    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5309    if (instr->hydrogen()->IsOldSpaceAllocation()) {
5310      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5311      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5312    }
5313    // If the allocation folding dominator allocate triggered a GC, allocation
5314    // happend in the runtime. We have to reset the top pointer to virtually
5315    // undo the allocation.
5316    ExternalReference allocation_top =
5317        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5318    Register top_address = scratch0();
5319    __ subi(r3, r3, Operand(kHeapObjectTag));
5320    __ mov(top_address, Operand(allocation_top));
5321    __ StoreP(r3, MemOperand(top_address));
5322    __ addi(r3, r3, Operand(kHeapObjectTag));
5323  }
5324}
5325
5326void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5327  DCHECK(instr->hydrogen()->IsAllocationFolded());
5328  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5329  Register result = ToRegister(instr->result());
5330  Register scratch1 = ToRegister(instr->temp1());
5331  Register scratch2 = ToRegister(instr->temp2());
5332
5333  AllocationFlags flags = ALLOCATION_FOLDED;
5334  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5335    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5336  }
5337  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5338    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5339    flags = static_cast<AllocationFlags>(flags | PRETENURE);
5340  }
5341  if (instr->size()->IsConstantOperand()) {
5342    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5343    CHECK(size <= kMaxRegularHeapObjectSize);
5344    __ FastAllocate(size, result, scratch1, scratch2, flags);
5345  } else {
5346    Register size = ToRegister(instr->size());
5347    __ FastAllocate(size, result, scratch1, scratch2, flags);
5348  }
5349}
5350
5351
5352void LCodeGen::DoTypeof(LTypeof* instr) {
5353  DCHECK(ToRegister(instr->value()).is(r6));
5354  DCHECK(ToRegister(instr->result()).is(r3));
5355  Label end, do_call;
5356  Register value_register = ToRegister(instr->value());
5357  __ JumpIfNotSmi(value_register, &do_call);
5358  __ mov(r3, Operand(isolate()->factory()->number_string()));
5359  __ b(&end);
5360  __ bind(&do_call);
5361  Callable callable = CodeFactory::Typeof(isolate());
5362  CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
5363  __ bind(&end);
5364}
5365
5366
5367void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5368  Register input = ToRegister(instr->value());
5369
5370  Condition final_branch_condition =
5371      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5372                   instr->type_literal());
5373  if (final_branch_condition != kNoCondition) {
5374    EmitBranch(instr, final_branch_condition);
5375  }
5376}
5377
5378
5379Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5380                                 Register input, Handle<String> type_name) {
5381  Condition final_branch_condition = kNoCondition;
5382  Register scratch = scratch0();
5383  Factory* factory = isolate()->factory();
5384  if (String::Equals(type_name, factory->number_string())) {
5385    __ JumpIfSmi(input, true_label);
5386    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5387    __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5388    final_branch_condition = eq;
5389
5390  } else if (String::Equals(type_name, factory->string_string())) {
5391    __ JumpIfSmi(input, false_label);
5392    __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5393    final_branch_condition = lt;
5394
5395  } else if (String::Equals(type_name, factory->symbol_string())) {
5396    __ JumpIfSmi(input, false_label);
5397    __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5398    final_branch_condition = eq;
5399
5400  } else if (String::Equals(type_name, factory->boolean_string())) {
5401    __ CompareRoot(input, Heap::kTrueValueRootIndex);
5402    __ beq(true_label);
5403    __ CompareRoot(input, Heap::kFalseValueRootIndex);
5404    final_branch_condition = eq;
5405
5406  } else if (String::Equals(type_name, factory->undefined_string())) {
5407    __ CompareRoot(input, Heap::kNullValueRootIndex);
5408    __ beq(false_label);
5409    __ JumpIfSmi(input, false_label);
5410    // Check for undetectable objects => true.
5411    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5412    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5413    __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5414    __ cmpi(r0, Operand::Zero());
5415    final_branch_condition = ne;
5416
5417  } else if (String::Equals(type_name, factory->function_string())) {
5418    __ JumpIfSmi(input, false_label);
5419    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5420    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5421    __ andi(scratch, scratch,
5422            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5423    __ cmpi(scratch, Operand(1 << Map::kIsCallable));
5424    final_branch_condition = eq;
5425
5426  } else if (String::Equals(type_name, factory->object_string())) {
5427    __ JumpIfSmi(input, false_label);
5428    __ CompareRoot(input, Heap::kNullValueRootIndex);
5429    __ beq(true_label);
5430    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5431    __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5432    __ blt(false_label);
5433    // Check for callable or undetectable objects => false.
5434    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5435    __ andi(r0, scratch,
5436            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5437    __ cmpi(r0, Operand::Zero());
5438    final_branch_condition = eq;
5439
5440  } else {
5441    __ b(false_label);
5442  }
5443
5444  return final_branch_condition;
5445}
5446
5447
5448void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5449  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5450    // Ensure that we have enough space after the previous lazy-bailout
5451    // instruction for patching the code here.
5452    int current_pc = masm()->pc_offset();
5453    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5454      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5455      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5456      while (padding_size > 0) {
5457        __ nop();
5458        padding_size -= Assembler::kInstrSize;
5459      }
5460    }
5461  }
5462  last_lazy_deopt_pc_ = masm()->pc_offset();
5463}
5464
5465
5466void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5467  last_lazy_deopt_pc_ = masm()->pc_offset();
5468  DCHECK(instr->HasEnvironment());
5469  LEnvironment* env = instr->environment();
5470  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5471  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5472}
5473
5474
5475void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5476  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5477  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5478  // needed return address), even though the implementation of LAZY and EAGER is
5479  // now identical. When LAZY is eventually completely folded into EAGER, remove
5480  // the special case below.
5481  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5482    type = Deoptimizer::LAZY;
5483  }
5484
5485  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5486}
5487
5488
5489void LCodeGen::DoDummy(LDummy* instr) {
5490  // Nothing to see here, move on!
5491}
5492
5493
5494void LCodeGen::DoDummyUse(LDummyUse* instr) {
5495  // Nothing to see here, move on!
5496}
5497
5498
5499void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5500  PushSafepointRegistersScope scope(this);
5501  LoadContextFromDeferred(instr->context());
5502  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5503  RecordSafepointWithLazyDeopt(
5504      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5505  DCHECK(instr->HasEnvironment());
5506  LEnvironment* env = instr->environment();
5507  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5508}
5509
5510
5511void LCodeGen::DoStackCheck(LStackCheck* instr) {
5512  class DeferredStackCheck final : public LDeferredCode {
5513   public:
5514    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5515        : LDeferredCode(codegen), instr_(instr) {}
5516    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5517    LInstruction* instr() override { return instr_; }
5518
5519   private:
5520    LStackCheck* instr_;
5521  };
5522
5523  DCHECK(instr->HasEnvironment());
5524  LEnvironment* env = instr->environment();
5525  // There is no LLazyBailout instruction for stack-checks. We have to
5526  // prepare for lazy deoptimization explicitly here.
5527  if (instr->hydrogen()->is_function_entry()) {
5528    // Perform stack overflow check.
5529    Label done;
5530    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5531    __ cmpl(sp, ip);
5532    __ bge(&done);
5533    DCHECK(instr->context()->IsRegister());
5534    DCHECK(ToRegister(instr->context()).is(cp));
5535    CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5536             instr);
5537    __ bind(&done);
5538  } else {
5539    DCHECK(instr->hydrogen()->is_backwards_branch());
5540    // Perform stack overflow check if this goto needs it before jumping.
5541    DeferredStackCheck* deferred_stack_check =
5542        new (zone()) DeferredStackCheck(this, instr);
5543    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5544    __ cmpl(sp, ip);
5545    __ blt(deferred_stack_check->entry());
5546    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5547    __ bind(instr->done_label());
5548    deferred_stack_check->SetExit(instr->done_label());
5549    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5550    // Don't record a deoptimization index for the safepoint here.
5551    // This will be done explicitly when emitting call and the safepoint in
5552    // the deferred code.
5553  }
5554}
5555
5556
5557void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5558  // This is a pseudo-instruction that ensures that the environment here is
5559  // properly registered for deoptimization and records the assembler's PC
5560  // offset.
5561  LEnvironment* environment = instr->environment();
5562
5563  // If the environment were already registered, we would have no way of
5564  // backpatching it with the spill slot operands.
5565  DCHECK(!environment->HasBeenRegistered());
5566  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5567
5568  GenerateOsrPrologue();
5569}
5570
5571
5572void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5573  Label use_cache, call_runtime;
5574  __ CheckEnumCache(&call_runtime);
5575
5576  __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
5577  __ b(&use_cache);
5578
5579  // Get the set of properties to enumerate.
5580  __ bind(&call_runtime);
5581  __ push(r3);
5582  CallRuntime(Runtime::kForInEnumerate, instr);
5583  __ bind(&use_cache);
5584}
5585
5586
5587void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5588  Register map = ToRegister(instr->map());
5589  Register result = ToRegister(instr->result());
5590  Label load_cache, done;
5591  __ EnumLength(result, map);
5592  __ CmpSmiLiteral(result, Smi::kZero, r0);
5593  __ bne(&load_cache);
5594  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5595  __ b(&done);
5596
5597  __ bind(&load_cache);
5598  __ LoadInstanceDescriptors(map, result);
5599  __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5600  __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5601  __ cmpi(result, Operand::Zero());
5602  DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
5603
5604  __ bind(&done);
5605}
5606
5607
5608void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5609  Register object = ToRegister(instr->value());
5610  Register map = ToRegister(instr->map());
5611  __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5612  __ cmp(map, scratch0());
5613  DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
5614}
5615
5616
5617void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5618                                           Register result, Register object,
5619                                           Register index) {
5620  PushSafepointRegistersScope scope(this);
5621  __ Push(object, index);
5622  __ li(cp, Operand::Zero());
5623  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5624  RecordSafepointWithRegisters(instr->pointer_map(), 2,
5625                               Safepoint::kNoLazyDeopt);
5626  __ StoreToSafepointRegisterSlot(r3, result);
5627}
5628
5629
5630void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5631  class DeferredLoadMutableDouble final : public LDeferredCode {
5632   public:
5633    DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5634                              Register result, Register object, Register index)
5635        : LDeferredCode(codegen),
5636          instr_(instr),
5637          result_(result),
5638          object_(object),
5639          index_(index) {}
5640    void Generate() override {
5641      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5642    }
5643    LInstruction* instr() override { return instr_; }
5644
5645   private:
5646    LLoadFieldByIndex* instr_;
5647    Register result_;
5648    Register object_;
5649    Register index_;
5650  };
5651
5652  Register object = ToRegister(instr->object());
5653  Register index = ToRegister(instr->index());
5654  Register result = ToRegister(instr->result());
5655  Register scratch = scratch0();
5656
5657  DeferredLoadMutableDouble* deferred;
5658  deferred = new (zone())
5659      DeferredLoadMutableDouble(this, instr, result, object, index);
5660
5661  Label out_of_object, done;
5662
5663  __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5664  __ bne(deferred->entry(), cr0);
5665  __ ShiftRightArithImm(index, index, 1);
5666
5667  __ cmpi(index, Operand::Zero());
5668  __ blt(&out_of_object);
5669
5670  __ SmiToPtrArrayOffset(r0, index);
5671  __ add(scratch, object, r0);
5672  __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5673
5674  __ b(&done);
5675
5676  __ bind(&out_of_object);
5677  __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5678  // Index is equal to negated out of object property index plus 1.
5679  __ SmiToPtrArrayOffset(r0, index);
5680  __ sub(scratch, result, r0);
5681  __ LoadP(result,
5682           FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5683  __ bind(deferred->exit());
5684  __ bind(&done);
5685}
5686
5687#undef __
5688
5689}  // namespace internal
5690}  // namespace v8
5691