1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <assert.h>  // For assert
6#include <limits.h>  // For LONG_MIN, LONG_MAX.
7
8#if V8_TARGET_ARCH_PPC
9
10#include "src/base/bits.h"
11#include "src/base/division-by-constant.h"
12#include "src/bootstrapper.h"
13#include "src/codegen.h"
14#include "src/debug/debug.h"
15#include "src/register-configuration.h"
16#include "src/runtime/runtime.h"
17
18#include "src/ppc/macro-assembler-ppc.h"
19
20namespace v8 {
21namespace internal {
22
23MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24                               CodeObjectRequired create_code_object)
25    : Assembler(arg_isolate, buffer, size),
26      generating_stub_(false),
27      has_frame_(false) {
28  if (create_code_object == CodeObjectRequired::kYes) {
29    code_object_ =
30        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31  }
32}
33
34
35void MacroAssembler::Jump(Register target) {
36  mtctr(target);
37  bctr();
38}
39
40
41void MacroAssembler::JumpToJSEntry(Register target) {
42  Move(ip, target);
43  Jump(ip);
44}
45
46
47void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
48                          Condition cond, CRegister cr) {
49  Label skip;
50
51  if (cond != al) b(NegateCondition(cond), &skip, cr);
52
53  DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
54
55  mov(ip, Operand(target, rmode));
56  mtctr(ip);
57  bctr();
58
59  bind(&skip);
60}
61
62
63void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
64                          CRegister cr) {
65  DCHECK(!RelocInfo::IsCodeTarget(rmode));
66  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
67}
68
69
70void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
71                          Condition cond) {
72  DCHECK(RelocInfo::IsCodeTarget(rmode));
73  // 'code' is always generated ppc code, never THUMB code
74  AllowDeferredHandleDereference embedding_raw_address;
75  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
76}
77
78
79int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
80
81
82void MacroAssembler::Call(Register target) {
83  BlockTrampolinePoolScope block_trampoline_pool(this);
84  Label start;
85  bind(&start);
86
87  // branch via link register and set LK bit for return point
88  mtctr(target);
89  bctrl();
90
91  DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
92}
93
94
95void MacroAssembler::CallJSEntry(Register target) {
96  DCHECK(target.is(ip));
97  Call(target);
98}
99
100
101int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
102                             Condition cond) {
103  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
104  return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
105}
106
107
108int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
109                                                   RelocInfo::Mode rmode,
110                                                   Condition cond) {
111  return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
112}
113
114
115void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
116                          Condition cond) {
117  BlockTrampolinePoolScope block_trampoline_pool(this);
118  DCHECK(cond == al);
119
120#ifdef DEBUG
121  // Check the expected size before generating code to ensure we assume the same
122  // constant pool availability (e.g., whether constant pool is full or not).
123  int expected_size = CallSize(target, rmode, cond);
124  Label start;
125  bind(&start);
126#endif
127  // This can likely be optimized to make use of bc() with 24bit relative
128  //
129  // RecordRelocInfo(x.rmode_, x.imm_);
130  // bc( BA, .... offset, LKset);
131  //
132
133  mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
134  mtctr(ip);
135  bctrl();
136
137  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
138}
139
140
141int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
142                             TypeFeedbackId ast_id, Condition cond) {
143  AllowDeferredHandleDereference using_raw_address;
144  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
145}
146
147
148void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
149                          TypeFeedbackId ast_id, Condition cond) {
150  BlockTrampolinePoolScope block_trampoline_pool(this);
151  DCHECK(RelocInfo::IsCodeTarget(rmode));
152
153#ifdef DEBUG
154  // Check the expected size before generating code to ensure we assume the same
155  // constant pool availability (e.g., whether constant pool is full or not).
156  int expected_size = CallSize(code, rmode, ast_id, cond);
157  Label start;
158  bind(&start);
159#endif
160
161  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
162    SetRecordedAstId(ast_id);
163    rmode = RelocInfo::CODE_TARGET_WITH_ID;
164  }
165  AllowDeferredHandleDereference using_raw_address;
166  Call(reinterpret_cast<Address>(code.location()), rmode, cond);
167  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
168}
169
170
171void MacroAssembler::Drop(int count) {
172  if (count > 0) {
173    Add(sp, sp, count * kPointerSize, r0);
174  }
175}
176
177void MacroAssembler::Drop(Register count, Register scratch) {
178  ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
179  add(sp, sp, scratch);
180}
181
182void MacroAssembler::Call(Label* target) { b(target, SetLK); }
183
184
185void MacroAssembler::Push(Handle<Object> handle) {
186  mov(r0, Operand(handle));
187  push(r0);
188}
189
190
191void MacroAssembler::Move(Register dst, Handle<Object> value) {
192  mov(dst, Operand(value));
193}
194
195
196void MacroAssembler::Move(Register dst, Register src, Condition cond) {
197  DCHECK(cond == al);
198  if (!dst.is(src)) {
199    mr(dst, src);
200  }
201}
202
203
204void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
205  if (!dst.is(src)) {
206    fmr(dst, src);
207  }
208}
209
210
211void MacroAssembler::MultiPush(RegList regs, Register location) {
212  int16_t num_to_push = NumberOfBitsSet(regs);
213  int16_t stack_offset = num_to_push * kPointerSize;
214
215  subi(location, location, Operand(stack_offset));
216  for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
217    if ((regs & (1 << i)) != 0) {
218      stack_offset -= kPointerSize;
219      StoreP(ToRegister(i), MemOperand(location, stack_offset));
220    }
221  }
222}
223
224
225void MacroAssembler::MultiPop(RegList regs, Register location) {
226  int16_t stack_offset = 0;
227
228  for (int16_t i = 0; i < Register::kNumRegisters; i++) {
229    if ((regs & (1 << i)) != 0) {
230      LoadP(ToRegister(i), MemOperand(location, stack_offset));
231      stack_offset += kPointerSize;
232    }
233  }
234  addi(location, location, Operand(stack_offset));
235}
236
237
238void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
239  int16_t num_to_push = NumberOfBitsSet(dregs);
240  int16_t stack_offset = num_to_push * kDoubleSize;
241
242  subi(location, location, Operand(stack_offset));
243  for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
244    if ((dregs & (1 << i)) != 0) {
245      DoubleRegister dreg = DoubleRegister::from_code(i);
246      stack_offset -= kDoubleSize;
247      stfd(dreg, MemOperand(location, stack_offset));
248    }
249  }
250}
251
252
253void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
254  int16_t stack_offset = 0;
255
256  for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
257    if ((dregs & (1 << i)) != 0) {
258      DoubleRegister dreg = DoubleRegister::from_code(i);
259      lfd(dreg, MemOperand(location, stack_offset));
260      stack_offset += kDoubleSize;
261    }
262  }
263  addi(location, location, Operand(stack_offset));
264}
265
266
267void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
268                              Condition cond) {
269  DCHECK(cond == al);
270  LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
271}
272
273
274void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
275                               Condition cond) {
276  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
277  DCHECK(cond == al);
278  StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
279}
280
281
282void MacroAssembler::InNewSpace(Register object, Register scratch,
283                                Condition cond, Label* branch) {
284  DCHECK(cond == eq || cond == ne);
285  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
286}
287
288
289void MacroAssembler::RecordWriteField(
290    Register object, int offset, Register value, Register dst,
291    LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
292    RememberedSetAction remembered_set_action, SmiCheck smi_check,
293    PointersToHereCheck pointers_to_here_check_for_value) {
294  // First, check if a write barrier is even needed. The tests below
295  // catch stores of Smis.
296  Label done;
297
298  // Skip barrier if writing a smi.
299  if (smi_check == INLINE_SMI_CHECK) {
300    JumpIfSmi(value, &done);
301  }
302
303  // Although the object register is tagged, the offset is relative to the start
304  // of the object, so so offset must be a multiple of kPointerSize.
305  DCHECK(IsAligned(offset, kPointerSize));
306
307  Add(dst, object, offset - kHeapObjectTag, r0);
308  if (emit_debug_code()) {
309    Label ok;
310    andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
311    beq(&ok, cr0);
312    stop("Unaligned cell in write barrier");
313    bind(&ok);
314  }
315
316  RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
317              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
318
319  bind(&done);
320
321  // Clobber clobbered input registers when running with the debug-code flag
322  // turned on to provoke errors.
323  if (emit_debug_code()) {
324    mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
325    mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
326  }
327}
328
329
330// Will clobber 4 registers: object, map, dst, ip.  The
331// register 'object' contains a heap object pointer.
332void MacroAssembler::RecordWriteForMap(Register object, Register map,
333                                       Register dst,
334                                       LinkRegisterStatus lr_status,
335                                       SaveFPRegsMode fp_mode) {
336  if (emit_debug_code()) {
337    LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
338    Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
339    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
340  }
341
342  if (!FLAG_incremental_marking) {
343    return;
344  }
345
346  if (emit_debug_code()) {
347    LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
348    cmp(ip, map);
349    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
350  }
351
352  Label done;
353
354  // A single check of the map's pages interesting flag suffices, since it is
355  // only set during incremental collection, and then it's also guaranteed that
356  // the from object's page's interesting flag is also set.  This optimization
357  // relies on the fact that maps can never be in new space.
358  CheckPageFlag(map,
359                map,  // Used as scratch.
360                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
361
362  addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
363  if (emit_debug_code()) {
364    Label ok;
365    andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
366    beq(&ok, cr0);
367    stop("Unaligned cell in write barrier");
368    bind(&ok);
369  }
370
371  // Record the actual write.
372  if (lr_status == kLRHasNotBeenSaved) {
373    mflr(r0);
374    push(r0);
375  }
376  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
377                       fp_mode);
378  CallStub(&stub);
379  if (lr_status == kLRHasNotBeenSaved) {
380    pop(r0);
381    mtlr(r0);
382  }
383
384  bind(&done);
385
386  // Count number of write barriers in generated code.
387  isolate()->counters()->write_barriers_static()->Increment();
388  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
389
390  // Clobber clobbered registers when running with the debug-code flag
391  // turned on to provoke errors.
392  if (emit_debug_code()) {
393    mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
394    mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
395  }
396}
397
398
399// Will clobber 4 registers: object, address, scratch, ip.  The
400// register 'object' contains a heap object pointer.  The heap object
401// tag is shifted away.
402void MacroAssembler::RecordWrite(
403    Register object, Register address, Register value,
404    LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
405    RememberedSetAction remembered_set_action, SmiCheck smi_check,
406    PointersToHereCheck pointers_to_here_check_for_value) {
407  DCHECK(!object.is(value));
408  if (emit_debug_code()) {
409    LoadP(r0, MemOperand(address));
410    cmp(r0, value);
411    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
412  }
413
414  if (remembered_set_action == OMIT_REMEMBERED_SET &&
415      !FLAG_incremental_marking) {
416    return;
417  }
418
419  // First, check if a write barrier is even needed. The tests below
420  // catch stores of smis and stores into the young generation.
421  Label done;
422
423  if (smi_check == INLINE_SMI_CHECK) {
424    JumpIfSmi(value, &done);
425  }
426
427  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
428    CheckPageFlag(value,
429                  value,  // Used as scratch.
430                  MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
431  }
432  CheckPageFlag(object,
433                value,  // Used as scratch.
434                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
435
436  // Record the actual write.
437  if (lr_status == kLRHasNotBeenSaved) {
438    mflr(r0);
439    push(r0);
440  }
441  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
442                       fp_mode);
443  CallStub(&stub);
444  if (lr_status == kLRHasNotBeenSaved) {
445    pop(r0);
446    mtlr(r0);
447  }
448
449  bind(&done);
450
451  // Count number of write barriers in generated code.
452  isolate()->counters()->write_barriers_static()->Increment();
453  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
454                   value);
455
456  // Clobber clobbered registers when running with the debug-code flag
457  // turned on to provoke errors.
458  if (emit_debug_code()) {
459    mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
460    mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
461  }
462}
463
464void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
465                                               Register code_entry,
466                                               Register scratch) {
467  const int offset = JSFunction::kCodeEntryOffset;
468
469  // Since a code entry (value) is always in old space, we don't need to update
470  // remembered set. If incremental marking is off, there is nothing for us to
471  // do.
472  if (!FLAG_incremental_marking) return;
473
474  DCHECK(js_function.is(r4));
475  DCHECK(code_entry.is(r7));
476  DCHECK(scratch.is(r8));
477  AssertNotSmi(js_function);
478
479  if (emit_debug_code()) {
480    addi(scratch, js_function, Operand(offset - kHeapObjectTag));
481    LoadP(ip, MemOperand(scratch));
482    cmp(ip, code_entry);
483    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
484  }
485
486  // First, check if a write barrier is even needed. The tests below
487  // catch stores of Smis and stores into young gen.
488  Label done;
489
490  CheckPageFlag(code_entry, scratch,
491                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
492  CheckPageFlag(js_function, scratch,
493                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
494
495  const Register dst = scratch;
496  addi(dst, js_function, Operand(offset - kHeapObjectTag));
497
498  // Save caller-saved registers.  js_function and code_entry are in the
499  // caller-saved register list.
500  DCHECK(kJSCallerSaved & js_function.bit());
501  DCHECK(kJSCallerSaved & code_entry.bit());
502  mflr(r0);
503  MultiPush(kJSCallerSaved | r0.bit());
504
505  int argument_count = 3;
506  PrepareCallCFunction(argument_count, code_entry);
507
508  mr(r3, js_function);
509  mr(r4, dst);
510  mov(r5, Operand(ExternalReference::isolate_address(isolate())));
511
512  {
513    AllowExternalCallThatCantCauseGC scope(this);
514    CallCFunction(
515        ExternalReference::incremental_marking_record_write_code_entry_function(
516            isolate()),
517        argument_count);
518  }
519
520  // Restore caller-saved registers (including js_function and code_entry).
521  MultiPop(kJSCallerSaved | r0.bit());
522  mtlr(r0);
523
524  bind(&done);
525}
526
527void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
528                                         Register address, Register scratch,
529                                         SaveFPRegsMode fp_mode,
530                                         RememberedSetFinalAction and_then) {
531  Label done;
532  if (emit_debug_code()) {
533    Label ok;
534    JumpIfNotInNewSpace(object, scratch, &ok);
535    stop("Remembered set pointer is in new space");
536    bind(&ok);
537  }
538  // Load store buffer top.
539  ExternalReference store_buffer =
540      ExternalReference::store_buffer_top(isolate());
541  mov(ip, Operand(store_buffer));
542  LoadP(scratch, MemOperand(ip));
543  // Store pointer to buffer and increment buffer top.
544  StoreP(address, MemOperand(scratch));
545  addi(scratch, scratch, Operand(kPointerSize));
546  // Write back new top of buffer.
547  StoreP(scratch, MemOperand(ip));
548  // Call stub on end of buffer.
549  // Check for end of buffer.
550  TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
551
552  if (and_then == kFallThroughAtEnd) {
553    bne(&done, cr0);
554  } else {
555    DCHECK(and_then == kReturnAtEnd);
556    Ret(ne, cr0);
557  }
558  mflr(r0);
559  push(r0);
560  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
561  CallStub(&store_buffer_overflow);
562  pop(r0);
563  mtlr(r0);
564  bind(&done);
565  if (and_then == kReturnAtEnd) {
566    Ret();
567  }
568}
569
570void MacroAssembler::PushCommonFrame(Register marker_reg) {
571  int fp_delta = 0;
572  mflr(r0);
573  if (FLAG_enable_embedded_constant_pool) {
574    if (marker_reg.is_valid()) {
575      Push(r0, fp, kConstantPoolRegister, marker_reg);
576      fp_delta = 2;
577    } else {
578      Push(r0, fp, kConstantPoolRegister);
579      fp_delta = 1;
580    }
581  } else {
582    if (marker_reg.is_valid()) {
583      Push(r0, fp, marker_reg);
584      fp_delta = 1;
585    } else {
586      Push(r0, fp);
587      fp_delta = 0;
588    }
589  }
590  addi(fp, sp, Operand(fp_delta * kPointerSize));
591}
592
593void MacroAssembler::PopCommonFrame(Register marker_reg) {
594  if (FLAG_enable_embedded_constant_pool) {
595    if (marker_reg.is_valid()) {
596      Pop(r0, fp, kConstantPoolRegister, marker_reg);
597    } else {
598      Pop(r0, fp, kConstantPoolRegister);
599    }
600  } else {
601    if (marker_reg.is_valid()) {
602      Pop(r0, fp, marker_reg);
603    } else {
604      Pop(r0, fp);
605    }
606  }
607  mtlr(r0);
608}
609
610void MacroAssembler::PushStandardFrame(Register function_reg) {
611  int fp_delta = 0;
612  mflr(r0);
613  if (FLAG_enable_embedded_constant_pool) {
614    if (function_reg.is_valid()) {
615      Push(r0, fp, kConstantPoolRegister, cp, function_reg);
616      fp_delta = 3;
617    } else {
618      Push(r0, fp, kConstantPoolRegister, cp);
619      fp_delta = 2;
620    }
621  } else {
622    if (function_reg.is_valid()) {
623      Push(r0, fp, cp, function_reg);
624      fp_delta = 2;
625    } else {
626      Push(r0, fp, cp);
627      fp_delta = 1;
628    }
629  }
630  addi(fp, sp, Operand(fp_delta * kPointerSize));
631}
632
633void MacroAssembler::RestoreFrameStateForTailCall() {
634  if (FLAG_enable_embedded_constant_pool) {
635    LoadP(kConstantPoolRegister,
636          MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
637    set_constant_pool_available(false);
638  }
639  LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
640  LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
641  mtlr(r0);
642}
643
644const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
645const int MacroAssembler::kNumSafepointSavedRegisters =
646    Register::kNumAllocatable;
647
648// Push and pop all registers that can hold pointers.
649void MacroAssembler::PushSafepointRegisters() {
650  // Safepoints expect a block of kNumSafepointRegisters values on the
651  // stack, so adjust the stack for unsaved registers.
652  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
653  DCHECK(num_unsaved >= 0);
654  if (num_unsaved > 0) {
655    subi(sp, sp, Operand(num_unsaved * kPointerSize));
656  }
657  MultiPush(kSafepointSavedRegisters);
658}
659
660
661void MacroAssembler::PopSafepointRegisters() {
662  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
663  MultiPop(kSafepointSavedRegisters);
664  if (num_unsaved > 0) {
665    addi(sp, sp, Operand(num_unsaved * kPointerSize));
666  }
667}
668
669
670void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
671  StoreP(src, SafepointRegisterSlot(dst));
672}
673
674
675void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
676  LoadP(dst, SafepointRegisterSlot(src));
677}
678
679
680int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
681  // The registers are pushed starting with the highest encoding,
682  // which means that lowest encodings are closest to the stack pointer.
683  RegList regs = kSafepointSavedRegisters;
684  int index = 0;
685
686  DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
687
688  for (int16_t i = 0; i < reg_code; i++) {
689    if ((regs & (1 << i)) != 0) {
690      index++;
691    }
692  }
693
694  return index;
695}
696
697
698MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
699  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
700}
701
702
703MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
704  // General purpose registers are pushed last on the stack.
705  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
706  int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
707  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
708  return MemOperand(sp, doubles_size + register_offset);
709}
710
711
712void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
713                                     const DoubleRegister src) {
714  // Turn potential sNaN into qNaN.
715  fsub(dst, src, kDoubleRegZero);
716}
717
718void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
719  MovIntToDouble(dst, src, r0);
720  fcfid(dst, dst);
721}
722
723void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
724                                                DoubleRegister dst) {
725  MovUnsignedIntToDouble(dst, src, r0);
726  fcfid(dst, dst);
727}
728
729void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
730  MovIntToDouble(dst, src, r0);
731  fcfids(dst, dst);
732}
733
734void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
735                                               DoubleRegister dst) {
736  MovUnsignedIntToDouble(dst, src, r0);
737  fcfids(dst, dst);
738}
739
740#if V8_TARGET_ARCH_PPC64
741void MacroAssembler::ConvertInt64ToDouble(Register src,
742                                          DoubleRegister double_dst) {
743  MovInt64ToDouble(double_dst, src);
744  fcfid(double_dst, double_dst);
745}
746
747
748void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
749                                                 DoubleRegister double_dst) {
750  MovInt64ToDouble(double_dst, src);
751  fcfidus(double_dst, double_dst);
752}
753
754
755void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
756                                                  DoubleRegister double_dst) {
757  MovInt64ToDouble(double_dst, src);
758  fcfidu(double_dst, double_dst);
759}
760
761
762void MacroAssembler::ConvertInt64ToFloat(Register src,
763                                         DoubleRegister double_dst) {
764  MovInt64ToDouble(double_dst, src);
765  fcfids(double_dst, double_dst);
766}
767#endif
768
769
770void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
771#if !V8_TARGET_ARCH_PPC64
772                                          const Register dst_hi,
773#endif
774                                          const Register dst,
775                                          const DoubleRegister double_dst,
776                                          FPRoundingMode rounding_mode) {
777  if (rounding_mode == kRoundToZero) {
778    fctidz(double_dst, double_input);
779  } else {
780    SetRoundingMode(rounding_mode);
781    fctid(double_dst, double_input);
782    ResetRoundingMode();
783  }
784
785  MovDoubleToInt64(
786#if !V8_TARGET_ARCH_PPC64
787      dst_hi,
788#endif
789      dst, double_dst);
790}
791
792#if V8_TARGET_ARCH_PPC64
793void MacroAssembler::ConvertDoubleToUnsignedInt64(
794    const DoubleRegister double_input, const Register dst,
795    const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
796  if (rounding_mode == kRoundToZero) {
797    fctiduz(double_dst, double_input);
798  } else {
799    SetRoundingMode(rounding_mode);
800    fctidu(double_dst, double_input);
801    ResetRoundingMode();
802  }
803
804  MovDoubleToInt64(dst, double_dst);
805}
806#endif
807
808#if !V8_TARGET_ARCH_PPC64
809void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
810                                   Register src_low, Register src_high,
811                                   Register scratch, Register shift) {
812  DCHECK(!AreAliased(dst_low, src_high, shift));
813  DCHECK(!AreAliased(dst_high, src_low, shift));
814  Label less_than_32;
815  Label done;
816  cmpi(shift, Operand(32));
817  blt(&less_than_32);
818  // If shift >= 32
819  andi(scratch, shift, Operand(0x1f));
820  slw(dst_high, src_low, scratch);
821  li(dst_low, Operand::Zero());
822  b(&done);
823  bind(&less_than_32);
824  // If shift < 32
825  subfic(scratch, shift, Operand(32));
826  slw(dst_high, src_high, shift);
827  srw(scratch, src_low, scratch);
828  orx(dst_high, dst_high, scratch);
829  slw(dst_low, src_low, shift);
830  bind(&done);
831}
832
833void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
834                                   Register src_low, Register src_high,
835                                   uint32_t shift) {
836  DCHECK(!AreAliased(dst_low, src_high));
837  DCHECK(!AreAliased(dst_high, src_low));
838  if (shift == 32) {
839    Move(dst_high, src_low);
840    li(dst_low, Operand::Zero());
841  } else if (shift > 32) {
842    shift &= 0x1f;
843    slwi(dst_high, src_low, Operand(shift));
844    li(dst_low, Operand::Zero());
845  } else if (shift == 0) {
846    Move(dst_low, src_low);
847    Move(dst_high, src_high);
848  } else {
849    slwi(dst_high, src_high, Operand(shift));
850    rlwimi(dst_high, src_low, shift, 32 - shift, 31);
851    slwi(dst_low, src_low, Operand(shift));
852  }
853}
854
855void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
856                                    Register src_low, Register src_high,
857                                    Register scratch, Register shift) {
858  DCHECK(!AreAliased(dst_low, src_high, shift));
859  DCHECK(!AreAliased(dst_high, src_low, shift));
860  Label less_than_32;
861  Label done;
862  cmpi(shift, Operand(32));
863  blt(&less_than_32);
864  // If shift >= 32
865  andi(scratch, shift, Operand(0x1f));
866  srw(dst_low, src_high, scratch);
867  li(dst_high, Operand::Zero());
868  b(&done);
869  bind(&less_than_32);
870  // If shift < 32
871  subfic(scratch, shift, Operand(32));
872  srw(dst_low, src_low, shift);
873  slw(scratch, src_high, scratch);
874  orx(dst_low, dst_low, scratch);
875  srw(dst_high, src_high, shift);
876  bind(&done);
877}
878
879void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
880                                    Register src_low, Register src_high,
881                                    uint32_t shift) {
882  DCHECK(!AreAliased(dst_low, src_high));
883  DCHECK(!AreAliased(dst_high, src_low));
884  if (shift == 32) {
885    Move(dst_low, src_high);
886    li(dst_high, Operand::Zero());
887  } else if (shift > 32) {
888    shift &= 0x1f;
889    srwi(dst_low, src_high, Operand(shift));
890    li(dst_high, Operand::Zero());
891  } else if (shift == 0) {
892    Move(dst_low, src_low);
893    Move(dst_high, src_high);
894  } else {
895    srwi(dst_low, src_low, Operand(shift));
896    rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
897    srwi(dst_high, src_high, Operand(shift));
898  }
899}
900
901void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
902                                       Register src_low, Register src_high,
903                                       Register scratch, Register shift) {
904  DCHECK(!AreAliased(dst_low, src_high, shift));
905  DCHECK(!AreAliased(dst_high, src_low, shift));
906  Label less_than_32;
907  Label done;
908  cmpi(shift, Operand(32));
909  blt(&less_than_32);
910  // If shift >= 32
911  andi(scratch, shift, Operand(0x1f));
912  sraw(dst_low, src_high, scratch);
913  srawi(dst_high, src_high, 31);
914  b(&done);
915  bind(&less_than_32);
916  // If shift < 32
917  subfic(scratch, shift, Operand(32));
918  srw(dst_low, src_low, shift);
919  slw(scratch, src_high, scratch);
920  orx(dst_low, dst_low, scratch);
921  sraw(dst_high, src_high, shift);
922  bind(&done);
923}
924
925void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
926                                       Register src_low, Register src_high,
927                                       uint32_t shift) {
928  DCHECK(!AreAliased(dst_low, src_high));
929  DCHECK(!AreAliased(dst_high, src_low));
930  if (shift == 32) {
931    Move(dst_low, src_high);
932    srawi(dst_high, src_high, 31);
933  } else if (shift > 32) {
934    shift &= 0x1f;
935    srawi(dst_low, src_high, shift);
936    srawi(dst_high, src_high, 31);
937  } else if (shift == 0) {
938    Move(dst_low, src_low);
939    Move(dst_high, src_high);
940  } else {
941    srwi(dst_low, src_low, Operand(shift));
942    rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
943    srawi(dst_high, src_high, shift);
944  }
945}
946#endif
947
948void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
949    Register code_target_address) {
950  lwz(kConstantPoolRegister,
951      MemOperand(code_target_address,
952                 Code::kConstantPoolOffset - Code::kHeaderSize));
953  add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
954}
955
956
957void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
958                                                     int code_start_delta) {
959  add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
960                   code_start_delta);
961}
962
963
964void MacroAssembler::LoadConstantPoolPointerRegister() {
965  mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
966}
967
968void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
969                                  int prologue_offset) {
970  {
971    ConstantPoolUnavailableScope constant_pool_unavailable(this);
972    mov(r11, Operand(StackFrame::TypeToMarker(type)));
973    PushCommonFrame(r11);
974  }
975  if (FLAG_enable_embedded_constant_pool) {
976    if (!base.is(no_reg)) {
977      // base contains prologue address
978      LoadConstantPoolPointerRegister(base, -prologue_offset);
979    } else {
980      LoadConstantPoolPointerRegister();
981    }
982    set_constant_pool_available(true);
983  }
984}
985
986
987void MacroAssembler::Prologue(bool code_pre_aging, Register base,
988                              int prologue_offset) {
989  DCHECK(!base.is(no_reg));
990  {
991    PredictableCodeSizeScope predictible_code_size_scope(
992        this, kNoCodeAgeSequenceLength);
993    Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
994    // The following instructions must remain together and unmodified
995    // for code aging to work properly.
996    if (code_pre_aging) {
997      // Pre-age the code.
998      // This matches the code found in PatchPlatformCodeAge()
999      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1000      intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
1001      // Don't use Call -- we need to preserve ip and lr
1002      nop();  // marker to detect sequence (see IsOld)
1003      mov(r3, Operand(target));
1004      Jump(r3);
1005      for (int i = 0; i < kCodeAgingSequenceNops; i++) {
1006        nop();
1007      }
1008    } else {
1009      // This matches the code found in GetNoCodeAgeSequence()
1010      PushStandardFrame(r4);
1011      for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
1012        nop();
1013      }
1014    }
1015  }
1016  if (FLAG_enable_embedded_constant_pool) {
1017    // base contains prologue address
1018    LoadConstantPoolPointerRegister(base, -prologue_offset);
1019    set_constant_pool_available(true);
1020  }
1021}
1022
1023void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
1024  LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1025  LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
1026  LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
1027}
1028
1029
1030void MacroAssembler::EnterFrame(StackFrame::Type type,
1031                                bool load_constant_pool_pointer_reg) {
1032  if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1033    // Push type explicitly so we can leverage the constant pool.
1034    // This path cannot rely on ip containing code entry.
1035    PushCommonFrame();
1036    LoadConstantPoolPointerRegister();
1037    mov(ip, Operand(StackFrame::TypeToMarker(type)));
1038    push(ip);
1039  } else {
1040    mov(ip, Operand(StackFrame::TypeToMarker(type)));
1041    PushCommonFrame(ip);
1042  }
1043  if (type == StackFrame::INTERNAL) {
1044    mov(r0, Operand(CodeObject()));
1045    push(r0);
1046  }
1047}
1048
1049
1050int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1051  ConstantPoolUnavailableScope constant_pool_unavailable(this);
1052  // r3: preserved
1053  // r4: preserved
1054  // r5: preserved
1055
1056  // Drop the execution stack down to the frame pointer and restore
1057  // the caller's state.
1058  int frame_ends;
1059  LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1060  LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1061  if (FLAG_enable_embedded_constant_pool) {
1062    LoadP(kConstantPoolRegister,
1063          MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1064  }
1065  mtlr(r0);
1066  frame_ends = pc_offset();
1067  Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
1068  mr(fp, ip);
1069  return frame_ends;
1070}
1071
1072void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1073                                       Register argc) {
1074  int fp_delta = 0;
1075  mflr(r0);
1076  if (FLAG_enable_embedded_constant_pool) {
1077    if (target.is_valid()) {
1078      Push(r0, fp, kConstantPoolRegister, context, target);
1079      fp_delta = 3;
1080    } else {
1081      Push(r0, fp, kConstantPoolRegister, context);
1082      fp_delta = 2;
1083    }
1084  } else {
1085    if (target.is_valid()) {
1086      Push(r0, fp, context, target);
1087      fp_delta = 2;
1088    } else {
1089      Push(r0, fp, context);
1090      fp_delta = 1;
1091    }
1092  }
1093  addi(fp, sp, Operand(fp_delta * kPointerSize));
1094  Push(argc);
1095}
1096
1097void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1098                                       Register argc) {
1099  Pop(argc);
1100  if (FLAG_enable_embedded_constant_pool) {
1101    if (target.is_valid()) {
1102      Pop(r0, fp, kConstantPoolRegister, context, target);
1103    } else {
1104      Pop(r0, fp, kConstantPoolRegister, context);
1105    }
1106  } else {
1107    if (target.is_valid()) {
1108      Pop(r0, fp, context, target);
1109    } else {
1110      Pop(r0, fp, context);
1111    }
1112  }
1113  mtlr(r0);
1114}
1115
1116// ExitFrame layout (probably wrongish.. needs updating)
1117//
1118//  SP -> previousSP
1119//        LK reserved
1120//        code
1121//        sp_on_exit (for debug?)
1122// oldSP->prev SP
1123//        LK
1124//        <parameters on stack>
1125
1126// Prior to calling EnterExitFrame, we've got a bunch of parameters
1127// on the stack that we need to wrap a real frame around.. so first
1128// we reserve a slot for LK and push the previous SP which is captured
1129// in the fp register (r31)
1130// Then - we buy a new frame
1131
1132void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1133                                    StackFrame::Type frame_type) {
1134  DCHECK(frame_type == StackFrame::EXIT ||
1135         frame_type == StackFrame::BUILTIN_EXIT);
1136  // Set up the frame structure on the stack.
1137  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1138  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1139  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1140  DCHECK(stack_space > 0);
1141
1142  // This is an opportunity to build a frame to wrap
1143  // all of the pushes that have happened inside of V8
1144  // since we were called from C code
1145
1146  mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1147  PushCommonFrame(ip);
1148  // Reserve room for saved entry sp and code object.
1149  subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1150
1151  if (emit_debug_code()) {
1152    li(r8, Operand::Zero());
1153    StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1154  }
1155  if (FLAG_enable_embedded_constant_pool) {
1156    StoreP(kConstantPoolRegister,
1157           MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1158  }
1159  mov(r8, Operand(CodeObject()));
1160  StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1161
1162  // Save the frame pointer and the context in top.
1163  mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1164  StoreP(fp, MemOperand(r8));
1165  mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1166  StoreP(cp, MemOperand(r8));
1167
1168  // Optionally save all volatile double registers.
1169  if (save_doubles) {
1170    MultiPushDoubles(kCallerSavedDoubles);
1171    // Note that d0 will be accessible at
1172    //   fp - ExitFrameConstants::kFrameSize -
1173    //   kNumCallerSavedDoubles * kDoubleSize,
1174    // since the sp slot and code slot were pushed after the fp.
1175  }
1176
1177  addi(sp, sp, Operand(-stack_space * kPointerSize));
1178
1179  // Allocate and align the frame preparing for calling the runtime
1180  // function.
1181  const int frame_alignment = ActivationFrameAlignment();
1182  if (frame_alignment > kPointerSize) {
1183    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1184    ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1185  }
1186  li(r0, Operand::Zero());
1187  StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1188
1189  // Set the exit frame sp value to point just before the return address
1190  // location.
1191  addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1192  StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1193}
1194
1195int MacroAssembler::ActivationFrameAlignment() {
1196#if !defined(USE_SIMULATOR)
1197  // Running on the real platform. Use the alignment as mandated by the local
1198  // environment.
1199  // Note: This will break if we ever start generating snapshots on one PPC
1200  // platform for another PPC platform with a different alignment.
1201  return base::OS::ActivationFrameAlignment();
1202#else  // Simulated
1203  // If we are using the simulator then we should always align to the expected
1204  // alignment. As the simulator is used to generate snapshots we do not know
1205  // if the target platform will need alignment, so this is controlled from a
1206  // flag.
1207  return FLAG_sim_stack_alignment;
1208#endif
1209}
1210
1211
1212void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1213                                    bool restore_context,
1214                                    bool argument_count_is_length) {
1215  ConstantPoolUnavailableScope constant_pool_unavailable(this);
1216  // Optionally restore all double registers.
1217  if (save_doubles) {
1218    // Calculate the stack location of the saved doubles and restore them.
1219    const int kNumRegs = kNumCallerSavedDoubles;
1220    const int offset =
1221        (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1222    addi(r6, fp, Operand(-offset));
1223    MultiPopDoubles(kCallerSavedDoubles, r6);
1224  }
1225
1226  // Clear top frame.
1227  li(r6, Operand::Zero());
1228  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1229  StoreP(r6, MemOperand(ip));
1230
1231  // Restore current context from top and clear it in debug mode.
1232  if (restore_context) {
1233    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1234    LoadP(cp, MemOperand(ip));
1235  }
1236#ifdef DEBUG
1237  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1238  StoreP(r6, MemOperand(ip));
1239#endif
1240
1241  // Tear down the exit frame, pop the arguments, and return.
1242  LeaveFrame(StackFrame::EXIT);
1243
1244  if (argument_count.is_valid()) {
1245    if (!argument_count_is_length) {
1246      ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1247    }
1248    add(sp, sp, argument_count);
1249  }
1250}
1251
1252
1253void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1254  Move(dst, d1);
1255}
1256
1257
1258void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1259  Move(dst, d1);
1260}
1261
1262void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1263                                        Register caller_args_count_reg,
1264                                        Register scratch0, Register scratch1) {
1265#if DEBUG
1266  if (callee_args_count.is_reg()) {
1267    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1268                       scratch1));
1269  } else {
1270    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1271  }
1272#endif
1273
1274  // Calculate the end of destination area where we will put the arguments
1275  // after we drop current frame. We add kPointerSize to count the receiver
1276  // argument which is not included into formal parameters count.
1277  Register dst_reg = scratch0;
1278  ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1279  add(dst_reg, fp, dst_reg);
1280  addi(dst_reg, dst_reg,
1281       Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1282
1283  Register src_reg = caller_args_count_reg;
1284  // Calculate the end of source area. +kPointerSize is for the receiver.
1285  if (callee_args_count.is_reg()) {
1286    ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1287    add(src_reg, sp, src_reg);
1288    addi(src_reg, src_reg, Operand(kPointerSize));
1289  } else {
1290    Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
1291  }
1292
1293  if (FLAG_debug_code) {
1294    cmpl(src_reg, dst_reg);
1295    Check(lt, kStackAccessBelowStackPointer);
1296  }
1297
1298  // Restore caller's frame pointer and return address now as they will be
1299  // overwritten by the copying loop.
1300  RestoreFrameStateForTailCall();
1301
1302  // Now copy callee arguments to the caller frame going backwards to avoid
1303  // callee arguments corruption (source and destination areas could overlap).
1304
1305  // Both src_reg and dst_reg are pointing to the word after the one to copy,
1306  // so they must be pre-decremented in the loop.
1307  Register tmp_reg = scratch1;
1308  Label loop;
1309  if (callee_args_count.is_reg()) {
1310    addi(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
1311  } else {
1312    mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1313  }
1314  mtctr(tmp_reg);
1315  bind(&loop);
1316  LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
1317  StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1318  bdnz(&loop);
1319
1320  // Leave current frame.
1321  mr(sp, dst_reg);
1322}
1323
1324void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1325                                    const ParameterCount& actual, Label* done,
1326                                    bool* definitely_mismatches,
1327                                    InvokeFlag flag,
1328                                    const CallWrapper& call_wrapper) {
1329  bool definitely_matches = false;
1330  *definitely_mismatches = false;
1331  Label regular_invoke;
1332
1333  // Check whether the expected and actual arguments count match. If not,
1334  // setup registers according to contract with ArgumentsAdaptorTrampoline:
1335  //  r3: actual arguments count
1336  //  r4: function (passed through to callee)
1337  //  r5: expected arguments count
1338
1339  // The code below is made a lot easier because the calling code already sets
1340  // up actual and expected registers according to the contract if values are
1341  // passed in registers.
1342
1343  // ARM has some sanity checks as per below, considering add them for PPC
1344  //  DCHECK(actual.is_immediate() || actual.reg().is(r3));
1345  //  DCHECK(expected.is_immediate() || expected.reg().is(r5));
1346
1347  if (expected.is_immediate()) {
1348    DCHECK(actual.is_immediate());
1349    mov(r3, Operand(actual.immediate()));
1350    if (expected.immediate() == actual.immediate()) {
1351      definitely_matches = true;
1352    } else {
1353      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1354      if (expected.immediate() == sentinel) {
1355        // Don't worry about adapting arguments for builtins that
1356        // don't want that done. Skip adaption code by making it look
1357        // like we have a match between expected and actual number of
1358        // arguments.
1359        definitely_matches = true;
1360      } else {
1361        *definitely_mismatches = true;
1362        mov(r5, Operand(expected.immediate()));
1363      }
1364    }
1365  } else {
1366    if (actual.is_immediate()) {
1367      mov(r3, Operand(actual.immediate()));
1368      cmpi(expected.reg(), Operand(actual.immediate()));
1369      beq(&regular_invoke);
1370    } else {
1371      cmp(expected.reg(), actual.reg());
1372      beq(&regular_invoke);
1373    }
1374  }
1375
1376  if (!definitely_matches) {
1377    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1378    if (flag == CALL_FUNCTION) {
1379      call_wrapper.BeforeCall(CallSize(adaptor));
1380      Call(adaptor);
1381      call_wrapper.AfterCall();
1382      if (!*definitely_mismatches) {
1383        b(done);
1384      }
1385    } else {
1386      Jump(adaptor, RelocInfo::CODE_TARGET);
1387    }
1388    bind(&regular_invoke);
1389  }
1390}
1391
1392void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1393                                    const ParameterCount& expected,
1394                                    const ParameterCount& actual) {
1395  Label skip_hook;
1396  ExternalReference debug_hook_avtive =
1397      ExternalReference::debug_hook_on_function_call_address(isolate());
1398  mov(r7, Operand(debug_hook_avtive));
1399  LoadByte(r7, MemOperand(r7), r0);
1400  extsb(r7, r7);
1401  CmpSmiLiteral(r7, Smi::kZero, r0);
1402  beq(&skip_hook);
1403  {
1404    FrameScope frame(this,
1405                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1406    if (expected.is_reg()) {
1407      SmiTag(expected.reg());
1408      Push(expected.reg());
1409    }
1410    if (actual.is_reg()) {
1411      SmiTag(actual.reg());
1412      Push(actual.reg());
1413    }
1414    if (new_target.is_valid()) {
1415      Push(new_target);
1416    }
1417    Push(fun, fun);
1418    CallRuntime(Runtime::kDebugOnFunctionCall);
1419    Pop(fun);
1420    if (new_target.is_valid()) {
1421      Pop(new_target);
1422    }
1423    if (actual.is_reg()) {
1424      Pop(actual.reg());
1425      SmiUntag(actual.reg());
1426    }
1427    if (expected.is_reg()) {
1428      Pop(expected.reg());
1429      SmiUntag(expected.reg());
1430    }
1431  }
1432  bind(&skip_hook);
1433}
1434
1435
1436void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1437                                        const ParameterCount& expected,
1438                                        const ParameterCount& actual,
1439                                        InvokeFlag flag,
1440                                        const CallWrapper& call_wrapper) {
1441  // You can't call a function without a valid frame.
1442  DCHECK(flag == JUMP_FUNCTION || has_frame());
1443  DCHECK(function.is(r4));
1444  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
1445
1446  if (call_wrapper.NeedsDebugHookCheck()) {
1447    CheckDebugHook(function, new_target, expected, actual);
1448  }
1449
1450  // Clear the new.target register if not given.
1451  if (!new_target.is_valid()) {
1452    LoadRoot(r6, Heap::kUndefinedValueRootIndex);
1453  }
1454
1455  Label done;
1456  bool definitely_mismatches = false;
1457  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1458                 call_wrapper);
1459  if (!definitely_mismatches) {
1460    // We call indirectly through the code field in the function to
1461    // allow recompilation to take effect without changing any of the
1462    // call sites.
1463    Register code = ip;
1464    LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1465    if (flag == CALL_FUNCTION) {
1466      call_wrapper.BeforeCall(CallSize(code));
1467      CallJSEntry(code);
1468      call_wrapper.AfterCall();
1469    } else {
1470      DCHECK(flag == JUMP_FUNCTION);
1471      JumpToJSEntry(code);
1472    }
1473
1474    // Continue here if InvokePrologue does handle the invocation due to
1475    // mismatched parameter counts.
1476    bind(&done);
1477  }
1478}
1479
1480
1481void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1482                                    const ParameterCount& actual,
1483                                    InvokeFlag flag,
1484                                    const CallWrapper& call_wrapper) {
1485  // You can't call a function without a valid frame.
1486  DCHECK(flag == JUMP_FUNCTION || has_frame());
1487
1488  // Contract with called JS functions requires that function is passed in r4.
1489  DCHECK(fun.is(r4));
1490
1491  Register expected_reg = r5;
1492  Register temp_reg = r7;
1493
1494  LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1495  LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1496  LoadWordArith(expected_reg,
1497                FieldMemOperand(
1498                    temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1499#if !defined(V8_TARGET_ARCH_PPC64)
1500  SmiUntag(expected_reg);
1501#endif
1502
1503  ParameterCount expected(expected_reg);
1504  InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1505}
1506
1507
1508void MacroAssembler::InvokeFunction(Register function,
1509                                    const ParameterCount& expected,
1510                                    const ParameterCount& actual,
1511                                    InvokeFlag flag,
1512                                    const CallWrapper& call_wrapper) {
1513  // You can't call a function without a valid frame.
1514  DCHECK(flag == JUMP_FUNCTION || has_frame());
1515
1516  // Contract with called JS functions requires that function is passed in r4.
1517  DCHECK(function.is(r4));
1518
1519  // Get the function and setup the context.
1520  LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1521
1522  InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
1523}
1524
1525
1526void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1527                                    const ParameterCount& expected,
1528                                    const ParameterCount& actual,
1529                                    InvokeFlag flag,
1530                                    const CallWrapper& call_wrapper) {
1531  Move(r4, function);
1532  InvokeFunction(r4, expected, actual, flag, call_wrapper);
1533}
1534
1535
1536void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1537                                          Label* fail) {
1538  DCHECK(kNotStringTag != 0);
1539
1540  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1541  lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1542  andi(r0, scratch, Operand(kIsNotStringMask));
1543  bne(fail, cr0);
1544}
1545
1546
1547void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1548                                      Label* fail) {
1549  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1550  lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1551  cmpi(scratch, Operand(LAST_NAME_TYPE));
1552  bgt(fail);
1553}
1554
1555
1556void MacroAssembler::MaybeDropFrames() {
1557  // Check whether we need to drop frames to restart a function on the stack.
1558  ExternalReference restart_fp =
1559      ExternalReference::debug_restart_fp_address(isolate());
1560  mov(r4, Operand(restart_fp));
1561  LoadWordArith(r4, MemOperand(r4));
1562  cmpi(r4, Operand::Zero());
1563  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
1564       ne);
1565}
1566
1567void MacroAssembler::PushStackHandler() {
1568  // Adjust this code if not the case.
1569  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1570  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1571
1572  // Link the current handler as the next handler.
1573  // Preserve r3-r7.
1574  mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1575  LoadP(r0, MemOperand(r8));
1576  push(r0);
1577
1578  // Set this new handler as the current one.
1579  StoreP(sp, MemOperand(r8));
1580}
1581
1582
1583void MacroAssembler::PopStackHandler() {
1584  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1585  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1586
1587  pop(r4);
1588  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1589  StoreP(r4, MemOperand(ip));
1590}
1591
1592
1593// Compute the hash code from the untagged key.  This must be kept in sync with
1594// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1595// code-stub-hydrogen.cc
1596void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1597  // First of all we assign the hash seed to scratch.
1598  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1599  SmiUntag(scratch);
1600
1601  // Xor original key with a seed.
1602  xor_(t0, t0, scratch);
1603
1604  // Compute the hash code from the untagged key.  This must be kept in sync
1605  // with ComputeIntegerHash in utils.h.
1606  //
1607  // hash = ~hash + (hash << 15);
1608  notx(scratch, t0);
1609  slwi(t0, t0, Operand(15));
1610  add(t0, scratch, t0);
1611  // hash = hash ^ (hash >> 12);
1612  srwi(scratch, t0, Operand(12));
1613  xor_(t0, t0, scratch);
1614  // hash = hash + (hash << 2);
1615  slwi(scratch, t0, Operand(2));
1616  add(t0, t0, scratch);
1617  // hash = hash ^ (hash >> 4);
1618  srwi(scratch, t0, Operand(4));
1619  xor_(t0, t0, scratch);
1620  // hash = hash * 2057;
1621  mr(r0, t0);
1622  slwi(scratch, t0, Operand(3));
1623  add(t0, t0, scratch);
1624  slwi(scratch, r0, Operand(11));
1625  add(t0, t0, scratch);
1626  // hash = hash ^ (hash >> 16);
1627  srwi(scratch, t0, Operand(16));
1628  xor_(t0, t0, scratch);
1629  // hash & 0x3fffffff
1630  ExtractBitRange(t0, t0, 29, 0);
1631}
1632
1633void MacroAssembler::Allocate(int object_size, Register result,
1634                              Register scratch1, Register scratch2,
1635                              Label* gc_required, AllocationFlags flags) {
1636  DCHECK(object_size <= kMaxRegularHeapObjectSize);
1637  DCHECK((flags & ALLOCATION_FOLDED) == 0);
1638  if (!FLAG_inline_new) {
1639    if (emit_debug_code()) {
1640      // Trash the registers to simulate an allocation failure.
1641      li(result, Operand(0x7091));
1642      li(scratch1, Operand(0x7191));
1643      li(scratch2, Operand(0x7291));
1644    }
1645    b(gc_required);
1646    return;
1647  }
1648
1649  DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1650
1651  // Make object size into bytes.
1652  if ((flags & SIZE_IN_WORDS) != 0) {
1653    object_size *= kPointerSize;
1654  }
1655  DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1656
1657  // Check relative positions of allocation top and limit addresses.
1658  ExternalReference allocation_top =
1659      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1660  ExternalReference allocation_limit =
1661      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1662
1663  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1664  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1665  DCHECK((limit - top) == kPointerSize);
1666
1667  // Set up allocation top address register.
1668  Register top_address = scratch1;
1669  // This code stores a temporary value in ip. This is OK, as the code below
1670  // does not need ip for implicit literal generation.
1671  Register alloc_limit = ip;
1672  Register result_end = scratch2;
1673  mov(top_address, Operand(allocation_top));
1674
1675  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1676    // Load allocation top into result and allocation limit into ip.
1677    LoadP(result, MemOperand(top_address));
1678    LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1679  } else {
1680    if (emit_debug_code()) {
1681      // Assert that result actually contains top on entry.
1682      LoadP(alloc_limit, MemOperand(top_address));
1683      cmp(result, alloc_limit);
1684      Check(eq, kUnexpectedAllocationTop);
1685    }
1686    // Load allocation limit. Result already contains allocation top.
1687    LoadP(alloc_limit, MemOperand(top_address, limit - top));
1688  }
1689
1690  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1691    // Align the next allocation. Storing the filler map without checking top is
1692    // safe in new-space because the limit of the heap is aligned there.
1693#if V8_TARGET_ARCH_PPC64
1694    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1695#else
1696    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1697    andi(result_end, result, Operand(kDoubleAlignmentMask));
1698    Label aligned;
1699    beq(&aligned, cr0);
1700    if ((flags & PRETENURE) != 0) {
1701      cmpl(result, alloc_limit);
1702      bge(gc_required);
1703    }
1704    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1705    stw(result_end, MemOperand(result));
1706    addi(result, result, Operand(kDoubleSize / 2));
1707    bind(&aligned);
1708#endif
1709  }
1710
1711  // Calculate new top and bail out if new space is exhausted. Use result
1712  // to calculate the new top.
1713  sub(r0, alloc_limit, result);
1714  if (is_int16(object_size)) {
1715    cmpi(r0, Operand(object_size));
1716    blt(gc_required);
1717    addi(result_end, result, Operand(object_size));
1718  } else {
1719    Cmpi(r0, Operand(object_size), result_end);
1720    blt(gc_required);
1721    add(result_end, result, result_end);
1722  }
1723
1724  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1725    // The top pointer is not updated for allocation folding dominators.
1726    StoreP(result_end, MemOperand(top_address));
1727  }
1728
1729  // Tag object.
1730  addi(result, result, Operand(kHeapObjectTag));
1731}
1732
1733
1734void MacroAssembler::Allocate(Register object_size, Register result,
1735                              Register result_end, Register scratch,
1736                              Label* gc_required, AllocationFlags flags) {
1737  DCHECK((flags & ALLOCATION_FOLDED) == 0);
1738  if (!FLAG_inline_new) {
1739    if (emit_debug_code()) {
1740      // Trash the registers to simulate an allocation failure.
1741      li(result, Operand(0x7091));
1742      li(scratch, Operand(0x7191));
1743      li(result_end, Operand(0x7291));
1744    }
1745    b(gc_required);
1746    return;
1747  }
1748
1749  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1750  // is not specified. Other registers must not overlap.
1751  DCHECK(!AreAliased(object_size, result, scratch, ip));
1752  DCHECK(!AreAliased(result_end, result, scratch, ip));
1753  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1754
1755  // Check relative positions of allocation top and limit addresses.
1756  ExternalReference allocation_top =
1757      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1758  ExternalReference allocation_limit =
1759      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1760  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1761  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1762  DCHECK((limit - top) == kPointerSize);
1763
1764  // Set up allocation top address and allocation limit registers.
1765  Register top_address = scratch;
1766  // This code stores a temporary value in ip. This is OK, as the code below
1767  // does not need ip for implicit literal generation.
1768  Register alloc_limit = ip;
1769  mov(top_address, Operand(allocation_top));
1770
1771  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1772    // Load allocation top into result and allocation limit into alloc_limit..
1773    LoadP(result, MemOperand(top_address));
1774    LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1775  } else {
1776    if (emit_debug_code()) {
1777      // Assert that result actually contains top on entry.
1778      LoadP(alloc_limit, MemOperand(top_address));
1779      cmp(result, alloc_limit);
1780      Check(eq, kUnexpectedAllocationTop);
1781    }
1782    // Load allocation limit. Result already contains allocation top.
1783    LoadP(alloc_limit, MemOperand(top_address, limit - top));
1784  }
1785
1786  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1787    // Align the next allocation. Storing the filler map without checking top is
1788    // safe in new-space because the limit of the heap is aligned there.
1789#if V8_TARGET_ARCH_PPC64
1790    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1791#else
1792    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1793    andi(result_end, result, Operand(kDoubleAlignmentMask));
1794    Label aligned;
1795    beq(&aligned, cr0);
1796    if ((flags & PRETENURE) != 0) {
1797      cmpl(result, alloc_limit);
1798      bge(gc_required);
1799    }
1800    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1801    stw(result_end, MemOperand(result));
1802    addi(result, result, Operand(kDoubleSize / 2));
1803    bind(&aligned);
1804#endif
1805  }
1806
1807  // Calculate new top and bail out if new space is exhausted. Use result
1808  // to calculate the new top. Object size may be in words so a shift is
1809  // required to get the number of bytes.
1810  sub(r0, alloc_limit, result);
1811  if ((flags & SIZE_IN_WORDS) != 0) {
1812    ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
1813    cmp(r0, result_end);
1814    blt(gc_required);
1815    add(result_end, result, result_end);
1816  } else {
1817    cmp(r0, object_size);
1818    blt(gc_required);
1819    add(result_end, result, object_size);
1820  }
1821
1822  // Update allocation top. result temporarily holds the new top.
1823  if (emit_debug_code()) {
1824    andi(r0, result_end, Operand(kObjectAlignmentMask));
1825    Check(eq, kUnalignedAllocationInNewSpace, cr0);
1826  }
1827  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1828    // The top pointer is not updated for allocation folding dominators.
1829    StoreP(result_end, MemOperand(top_address));
1830  }
1831
1832  // Tag object.
1833  addi(result, result, Operand(kHeapObjectTag));
1834}
1835
1836void MacroAssembler::FastAllocate(Register object_size, Register result,
1837                                  Register result_end, Register scratch,
1838                                  AllocationFlags flags) {
1839  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1840  // is not specified. Other registers must not overlap.
1841  DCHECK(!AreAliased(object_size, result, scratch, ip));
1842  DCHECK(!AreAliased(result_end, result, scratch, ip));
1843  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1844
1845  ExternalReference allocation_top =
1846      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1847
1848  Register top_address = scratch;
1849  mov(top_address, Operand(allocation_top));
1850  LoadP(result, MemOperand(top_address));
1851
1852  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1853    // Align the next allocation. Storing the filler map without checking top is
1854    // safe in new-space because the limit of the heap is aligned there.
1855#if V8_TARGET_ARCH_PPC64
1856    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1857#else
1858    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1859    andi(result_end, result, Operand(kDoubleAlignmentMask));
1860    Label aligned;
1861    beq(&aligned);
1862    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1863    stw(result_end, MemOperand(result));
1864    addi(result, result, Operand(kDoubleSize / 2));
1865    bind(&aligned);
1866#endif
1867  }
1868
1869  // Calculate new top using result. Object size may be in words so a shift is
1870  // required to get the number of bytes.
1871  if ((flags & SIZE_IN_WORDS) != 0) {
1872    ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
1873    add(result_end, result, result_end);
1874  } else {
1875    add(result_end, result, object_size);
1876  }
1877
1878  // Update allocation top. result temporarily holds the new top.
1879  if (emit_debug_code()) {
1880    andi(r0, result_end, Operand(kObjectAlignmentMask));
1881    Check(eq, kUnalignedAllocationInNewSpace, cr0);
1882  }
1883  StoreP(result_end, MemOperand(top_address));
1884
1885  // Tag object.
1886  addi(result, result, Operand(kHeapObjectTag));
1887}
1888
1889void MacroAssembler::FastAllocate(int object_size, Register result,
1890                                  Register scratch1, Register scratch2,
1891                                  AllocationFlags flags) {
1892  DCHECK(object_size <= kMaxRegularHeapObjectSize);
1893  DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1894
1895  // Make object size into bytes.
1896  if ((flags & SIZE_IN_WORDS) != 0) {
1897    object_size *= kPointerSize;
1898  }
1899  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1900
1901  ExternalReference allocation_top =
1902      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1903
1904  // Set up allocation top address register.
1905  Register top_address = scratch1;
1906  Register result_end = scratch2;
1907  mov(top_address, Operand(allocation_top));
1908  LoadP(result, MemOperand(top_address));
1909
1910  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1911    // Align the next allocation. Storing the filler map without checking top is
1912    // safe in new-space because the limit of the heap is aligned there.
1913#if V8_TARGET_ARCH_PPC64
1914    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1915#else
1916    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1917    andi(result_end, result, Operand(kDoubleAlignmentMask));
1918    Label aligned;
1919    beq(&aligned);
1920    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1921    stw(result_end, MemOperand(result));
1922    addi(result, result, Operand(kDoubleSize / 2));
1923    bind(&aligned);
1924#endif
1925  }
1926
1927  // Calculate new top using result.
1928  Add(result_end, result, object_size, r0);
1929
1930  // The top pointer is not updated for allocation folding dominators.
1931  StoreP(result_end, MemOperand(top_address));
1932
1933  // Tag object.
1934  addi(result, result, Operand(kHeapObjectTag));
1935}
1936
1937void MacroAssembler::CompareObjectType(Register object, Register map,
1938                                       Register type_reg, InstanceType type) {
1939  const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
1940
1941  LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1942  CompareInstanceType(map, temp, type);
1943}
1944
1945
1946void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1947                                         InstanceType type) {
1948  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1949  STATIC_ASSERT(LAST_TYPE < 256);
1950  lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1951  cmpi(type_reg, Operand(type));
1952}
1953
1954
1955void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1956  DCHECK(!obj.is(r0));
1957  LoadRoot(r0, index);
1958  cmp(obj, r0);
1959}
1960
1961void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1962                                            Register right,
1963                                            Register overflow_dst,
1964                                            Register scratch) {
1965  DCHECK(!dst.is(overflow_dst));
1966  DCHECK(!dst.is(scratch));
1967  DCHECK(!overflow_dst.is(scratch));
1968  DCHECK(!overflow_dst.is(left));
1969  DCHECK(!overflow_dst.is(right));
1970
1971  bool left_is_right = left.is(right);
1972  RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1973
1974  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1975  if (dst.is(left)) {
1976    mr(scratch, left);            // Preserve left.
1977    add(dst, left, right);        // Left is overwritten.
1978    xor_(overflow_dst, dst, scratch, xorRC);  // Original left.
1979    if (!left_is_right) xor_(scratch, dst, right);
1980  } else if (dst.is(right)) {
1981    mr(scratch, right);           // Preserve right.
1982    add(dst, left, right);        // Right is overwritten.
1983    xor_(overflow_dst, dst, left, xorRC);
1984    if (!left_is_right) xor_(scratch, dst, scratch);  // Original right.
1985  } else {
1986    add(dst, left, right);
1987    xor_(overflow_dst, dst, left, xorRC);
1988    if (!left_is_right) xor_(scratch, dst, right);
1989  }
1990  if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1991}
1992
1993
1994void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1995                                            intptr_t right,
1996                                            Register overflow_dst,
1997                                            Register scratch) {
1998  Register original_left = left;
1999  DCHECK(!dst.is(overflow_dst));
2000  DCHECK(!dst.is(scratch));
2001  DCHECK(!overflow_dst.is(scratch));
2002  DCHECK(!overflow_dst.is(left));
2003
2004  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2005  if (dst.is(left)) {
2006    // Preserve left.
2007    original_left = overflow_dst;
2008    mr(original_left, left);
2009  }
2010  Add(dst, left, right, scratch);
2011  xor_(overflow_dst, dst, original_left);
2012  if (right >= 0) {
2013    and_(overflow_dst, overflow_dst, dst, SetRC);
2014  } else {
2015    andc(overflow_dst, overflow_dst, dst, SetRC);
2016  }
2017}
2018
2019
2020void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2021                                            Register right,
2022                                            Register overflow_dst,
2023                                            Register scratch) {
2024  DCHECK(!dst.is(overflow_dst));
2025  DCHECK(!dst.is(scratch));
2026  DCHECK(!overflow_dst.is(scratch));
2027  DCHECK(!overflow_dst.is(left));
2028  DCHECK(!overflow_dst.is(right));
2029
2030  // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2031  if (dst.is(left)) {
2032    mr(scratch, left);      // Preserve left.
2033    sub(dst, left, right);  // Left is overwritten.
2034    xor_(overflow_dst, dst, scratch);
2035    xor_(scratch, scratch, right);
2036    and_(overflow_dst, overflow_dst, scratch, SetRC);
2037  } else if (dst.is(right)) {
2038    mr(scratch, right);     // Preserve right.
2039    sub(dst, left, right);  // Right is overwritten.
2040    xor_(overflow_dst, dst, left);
2041    xor_(scratch, left, scratch);
2042    and_(overflow_dst, overflow_dst, scratch, SetRC);
2043  } else {
2044    sub(dst, left, right);
2045    xor_(overflow_dst, dst, left);
2046    xor_(scratch, left, right);
2047    and_(overflow_dst, scratch, overflow_dst, SetRC);
2048  }
2049}
2050
2051
2052void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2053                                Label* early_success) {
2054  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2055  CompareMap(scratch, map, early_success);
2056}
2057
2058
2059void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2060                                Label* early_success) {
2061  mov(r0, Operand(map));
2062  cmp(obj_map, r0);
2063}
2064
2065
2066void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2067                              Label* fail, SmiCheckType smi_check_type) {
2068  if (smi_check_type == DO_SMI_CHECK) {
2069    JumpIfSmi(obj, fail);
2070  }
2071
2072  Label success;
2073  CompareMap(obj, scratch, map, &success);
2074  bne(fail);
2075  bind(&success);
2076}
2077
2078
2079void MacroAssembler::CheckMap(Register obj, Register scratch,
2080                              Heap::RootListIndex index, Label* fail,
2081                              SmiCheckType smi_check_type) {
2082  if (smi_check_type == DO_SMI_CHECK) {
2083    JumpIfSmi(obj, fail);
2084  }
2085  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2086  LoadRoot(r0, index);
2087  cmp(scratch, r0);
2088  bne(fail);
2089}
2090
2091
2092void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2093                                     Register scratch2, Handle<WeakCell> cell,
2094                                     Handle<Code> success,
2095                                     SmiCheckType smi_check_type) {
2096  Label fail;
2097  if (smi_check_type == DO_SMI_CHECK) {
2098    JumpIfSmi(obj, &fail);
2099  }
2100  LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2101  CmpWeakValue(scratch1, cell, scratch2);
2102  Jump(success, RelocInfo::CODE_TARGET, eq);
2103  bind(&fail);
2104}
2105
2106
2107void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2108                                  Register scratch, CRegister cr) {
2109  mov(scratch, Operand(cell));
2110  LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2111  cmp(value, scratch, cr);
2112}
2113
2114
2115void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2116  mov(value, Operand(cell));
2117  LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2118}
2119
2120
2121void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2122                                   Label* miss) {
2123  GetWeakValue(value, cell);
2124  JumpIfSmi(value, miss);
2125}
2126
2127
2128void MacroAssembler::GetMapConstructor(Register result, Register map,
2129                                       Register temp, Register temp2) {
2130  Label done, loop;
2131  LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2132  bind(&loop);
2133  JumpIfSmi(result, &done);
2134  CompareObjectType(result, temp, temp2, MAP_TYPE);
2135  bne(&done);
2136  LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2137  b(&loop);
2138  bind(&done);
2139}
2140
2141void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2142                              Condition cond) {
2143  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2144  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2145}
2146
2147
2148void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2149  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2150}
2151
2152
2153bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2154  return has_frame_ || !stub->SometimesSetsUpAFrame();
2155}
2156
2157void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2158  SmiUntag(ip, smi);
2159  ConvertIntToDouble(ip, value);
2160}
2161
2162
2163void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2164                                       Register scratch1, Register scratch2,
2165                                       DoubleRegister double_scratch) {
2166  TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2167}
2168
2169void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
2170                                           Register scratch1,
2171                                           Register scratch2) {
2172#if V8_TARGET_ARCH_PPC64
2173  MovDoubleToInt64(scratch1, input);
2174  rotldi(scratch1, scratch1, 1);
2175  cmpi(scratch1, Operand(1));
2176#else
2177  MovDoubleToInt64(scratch1, scratch2, input);
2178  Label done;
2179  cmpi(scratch2, Operand::Zero());
2180  bne(&done);
2181  lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
2182  cmp(scratch1, scratch2);
2183  bind(&done);
2184#endif
2185}
2186
2187void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
2188#if V8_TARGET_ARCH_PPC64
2189  MovDoubleToInt64(scratch, input);
2190#else
2191  MovDoubleHighToInt(scratch, input);
2192#endif
2193  cmpi(scratch, Operand::Zero());
2194}
2195
2196void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
2197#if V8_TARGET_ARCH_PPC64
2198  LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2199#else
2200  lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
2201#endif
2202  cmpi(scratch, Operand::Zero());
2203}
2204
2205void MacroAssembler::TryDoubleToInt32Exact(Register result,
2206                                           DoubleRegister double_input,
2207                                           Register scratch,
2208                                           DoubleRegister double_scratch) {
2209  Label done;
2210  DCHECK(!double_input.is(double_scratch));
2211
2212  ConvertDoubleToInt64(double_input,
2213#if !V8_TARGET_ARCH_PPC64
2214                       scratch,
2215#endif
2216                       result, double_scratch);
2217
2218#if V8_TARGET_ARCH_PPC64
2219  TestIfInt32(result, r0);
2220#else
2221  TestIfInt32(scratch, result, r0);
2222#endif
2223  bne(&done);
2224
2225  // convert back and compare
2226  fcfid(double_scratch, double_scratch);
2227  fcmpu(double_scratch, double_input);
2228  bind(&done);
2229}
2230
2231
2232void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2233                                   Register input_high, Register scratch,
2234                                   DoubleRegister double_scratch, Label* done,
2235                                   Label* exact) {
2236  DCHECK(!result.is(input_high));
2237  DCHECK(!double_input.is(double_scratch));
2238  Label exception;
2239
2240  MovDoubleHighToInt(input_high, double_input);
2241
2242  // Test for NaN/Inf
2243  ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2244  cmpli(result, Operand(0x7ff));
2245  beq(&exception);
2246
2247  // Convert (rounding to -Inf)
2248  ConvertDoubleToInt64(double_input,
2249#if !V8_TARGET_ARCH_PPC64
2250                       scratch,
2251#endif
2252                       result, double_scratch, kRoundToMinusInf);
2253
2254// Test for overflow
2255#if V8_TARGET_ARCH_PPC64
2256  TestIfInt32(result, r0);
2257#else
2258  TestIfInt32(scratch, result, r0);
2259#endif
2260  bne(&exception);
2261
2262  // Test for exactness
2263  fcfid(double_scratch, double_scratch);
2264  fcmpu(double_scratch, double_input);
2265  beq(exact);
2266  b(done);
2267
2268  bind(&exception);
2269}
2270
2271
2272void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2273                                                DoubleRegister double_input,
2274                                                Label* done) {
2275  DoubleRegister double_scratch = kScratchDoubleReg;
2276#if !V8_TARGET_ARCH_PPC64
2277  Register scratch = ip;
2278#endif
2279
2280  ConvertDoubleToInt64(double_input,
2281#if !V8_TARGET_ARCH_PPC64
2282                       scratch,
2283#endif
2284                       result, double_scratch);
2285
2286// Test for overflow
2287#if V8_TARGET_ARCH_PPC64
2288  TestIfInt32(result, r0);
2289#else
2290  TestIfInt32(scratch, result, r0);
2291#endif
2292  beq(done);
2293}
2294
2295
2296void MacroAssembler::TruncateDoubleToI(Register result,
2297                                       DoubleRegister double_input) {
2298  Label done;
2299
2300  TryInlineTruncateDoubleToI(result, double_input, &done);
2301
2302  // If we fell through then inline version didn't succeed - call stub instead.
2303  mflr(r0);
2304  push(r0);
2305  // Put input on stack.
2306  stfdu(double_input, MemOperand(sp, -kDoubleSize));
2307
2308  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2309  CallStub(&stub);
2310
2311  addi(sp, sp, Operand(kDoubleSize));
2312  pop(r0);
2313  mtlr(r0);
2314
2315  bind(&done);
2316}
2317
2318
2319void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2320  Label done;
2321  DoubleRegister double_scratch = kScratchDoubleReg;
2322  DCHECK(!result.is(object));
2323
2324  lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2325  TryInlineTruncateDoubleToI(result, double_scratch, &done);
2326
2327  // If we fell through then inline version didn't succeed - call stub instead.
2328  mflr(r0);
2329  push(r0);
2330  DoubleToIStub stub(isolate(), object, result,
2331                     HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2332  CallStub(&stub);
2333  pop(r0);
2334  mtlr(r0);
2335
2336  bind(&done);
2337}
2338
2339
2340void MacroAssembler::TruncateNumberToI(Register object, Register result,
2341                                       Register heap_number_map,
2342                                       Register scratch1, Label* not_number) {
2343  Label done;
2344  DCHECK(!result.is(object));
2345
2346  UntagAndJumpIfSmi(result, object, &done);
2347  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2348  TruncateHeapNumberToI(result, object);
2349
2350  bind(&done);
2351}
2352
2353
2354void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2355                                         int num_least_bits) {
2356#if V8_TARGET_ARCH_PPC64
2357  rldicl(dst, src, kBitsPerPointer - kSmiShift,
2358         kBitsPerPointer - num_least_bits);
2359#else
2360  rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2361         kBitsPerPointer - num_least_bits, 31);
2362#endif
2363}
2364
2365
2366void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2367                                           int num_least_bits) {
2368  rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2369}
2370
2371
2372void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2373                                 SaveFPRegsMode save_doubles) {
2374  // All parameters are on the stack.  r3 has the return value after call.
2375
2376  // If the expected number of arguments of the runtime function is
2377  // constant, we check that the actual number of arguments match the
2378  // expectation.
2379  CHECK(f->nargs < 0 || f->nargs == num_arguments);
2380
2381  // TODO(1236192): Most runtime routines don't need the number of
2382  // arguments passed in because it is constant. At some point we
2383  // should remove this need and make the runtime routine entry code
2384  // smarter.
2385  mov(r3, Operand(num_arguments));
2386  mov(r4, Operand(ExternalReference(f, isolate())));
2387  CEntryStub stub(isolate(),
2388#if V8_TARGET_ARCH_PPC64
2389                  f->result_size,
2390#else
2391                  1,
2392#endif
2393                  save_doubles);
2394  CallStub(&stub);
2395}
2396
2397
2398void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2399                                           int num_arguments) {
2400  mov(r3, Operand(num_arguments));
2401  mov(r4, Operand(ext));
2402
2403  CEntryStub stub(isolate(), 1);
2404  CallStub(&stub);
2405}
2406
2407
2408void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2409  const Runtime::Function* function = Runtime::FunctionForId(fid);
2410  DCHECK_EQ(1, function->result_size);
2411  if (function->nargs >= 0) {
2412    mov(r3, Operand(function->nargs));
2413  }
2414  JumpToExternalReference(ExternalReference(fid, isolate()));
2415}
2416
2417
2418void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2419                                             bool builtin_exit_frame) {
2420  mov(r4, Operand(builtin));
2421  CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
2422                  builtin_exit_frame);
2423  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2424}
2425
2426
2427void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2428                                Register scratch1, Register scratch2) {
2429  if (FLAG_native_code_counters && counter->Enabled()) {
2430    mov(scratch1, Operand(value));
2431    mov(scratch2, Operand(ExternalReference(counter)));
2432    stw(scratch1, MemOperand(scratch2));
2433  }
2434}
2435
2436
2437void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2438                                      Register scratch1, Register scratch2) {
2439  DCHECK(value > 0);
2440  if (FLAG_native_code_counters && counter->Enabled()) {
2441    mov(scratch2, Operand(ExternalReference(counter)));
2442    lwz(scratch1, MemOperand(scratch2));
2443    addi(scratch1, scratch1, Operand(value));
2444    stw(scratch1, MemOperand(scratch2));
2445  }
2446}
2447
2448
2449void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2450                                      Register scratch1, Register scratch2) {
2451  DCHECK(value > 0);
2452  if (FLAG_native_code_counters && counter->Enabled()) {
2453    mov(scratch2, Operand(ExternalReference(counter)));
2454    lwz(scratch1, MemOperand(scratch2));
2455    subi(scratch1, scratch1, Operand(value));
2456    stw(scratch1, MemOperand(scratch2));
2457  }
2458}
2459
2460
2461void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2462                            CRegister cr) {
2463  if (emit_debug_code()) Check(cond, reason, cr);
2464}
2465
2466
2467void MacroAssembler::AssertFastElements(Register elements) {
2468  if (emit_debug_code()) {
2469    DCHECK(!elements.is(r0));
2470    Label ok;
2471    push(elements);
2472    LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2473    LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
2474    cmp(elements, r0);
2475    beq(&ok);
2476    LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
2477    cmp(elements, r0);
2478    beq(&ok);
2479    LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
2480    cmp(elements, r0);
2481    beq(&ok);
2482    Abort(kJSObjectWithFastElementsMapHasSlowElements);
2483    bind(&ok);
2484    pop(elements);
2485  }
2486}
2487
2488
2489void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2490  Label L;
2491  b(cond, &L, cr);
2492  Abort(reason);
2493  // will not return here
2494  bind(&L);
2495}
2496
2497
2498void MacroAssembler::Abort(BailoutReason reason) {
2499  Label abort_start;
2500  bind(&abort_start);
2501#ifdef DEBUG
2502  const char* msg = GetBailoutReason(reason);
2503  if (msg != NULL) {
2504    RecordComment("Abort message: ");
2505    RecordComment(msg);
2506  }
2507
2508  if (FLAG_trap_on_abort) {
2509    stop(msg);
2510    return;
2511  }
2512#endif
2513
2514  // Check if Abort() has already been initialized.
2515  DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2516
2517  LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
2518
2519  // Disable stub call restrictions to always allow calls to abort.
2520  if (!has_frame_) {
2521    // We don't actually want to generate a pile of code for this, so just
2522    // claim there is a stack frame, without generating one.
2523    FrameScope scope(this, StackFrame::NONE);
2524    Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2525  } else {
2526    Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2527  }
2528  // will not return here
2529}
2530
2531
2532void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2533  if (context_chain_length > 0) {
2534    // Move up the chain of contexts to the context containing the slot.
2535    LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2536    for (int i = 1; i < context_chain_length; i++) {
2537      LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2538    }
2539  } else {
2540    // Slot is in the current function context.  Move it into the
2541    // destination register in case we store into it (the write barrier
2542    // cannot be allowed to destroy the context in esi).
2543    mr(dst, cp);
2544  }
2545}
2546
2547void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2548  LoadP(dst, NativeContextMemOperand());
2549  LoadP(dst, ContextMemOperand(dst, index));
2550}
2551
2552
2553void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2554                                                  Register map,
2555                                                  Register scratch) {
2556  // Load the initial map. The global functions all have initial maps.
2557  LoadP(map,
2558        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2559  if (emit_debug_code()) {
2560    Label ok, fail;
2561    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2562    b(&ok);
2563    bind(&fail);
2564    Abort(kGlobalFunctionsMustHaveInitialMap);
2565    bind(&ok);
2566  }
2567}
2568
2569
2570void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2571    Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2572  subi(scratch, reg, Operand(1));
2573  cmpi(scratch, Operand::Zero());
2574  blt(not_power_of_two_or_zero);
2575  and_(r0, scratch, reg, SetRC);
2576  bne(not_power_of_two_or_zero, cr0);
2577}
2578
2579
2580void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2581                                                     Register scratch,
2582                                                     Label* zero_and_neg,
2583                                                     Label* not_power_of_two) {
2584  subi(scratch, reg, Operand(1));
2585  cmpi(scratch, Operand::Zero());
2586  blt(zero_and_neg);
2587  and_(r0, scratch, reg, SetRC);
2588  bne(not_power_of_two, cr0);
2589}
2590
2591#if !V8_TARGET_ARCH_PPC64
2592void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2593  DCHECK(!reg.is(overflow));
2594  mr(overflow, reg);  // Save original value.
2595  SmiTag(reg);
2596  xor_(overflow, overflow, reg, SetRC);  // Overflow if (value ^ 2 * value) < 0.
2597}
2598
2599
2600void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2601                                         Register overflow) {
2602  if (dst.is(src)) {
2603    // Fall back to slower case.
2604    SmiTagCheckOverflow(dst, overflow);
2605  } else {
2606    DCHECK(!dst.is(src));
2607    DCHECK(!dst.is(overflow));
2608    DCHECK(!src.is(overflow));
2609    SmiTag(dst, src);
2610    xor_(overflow, dst, src, SetRC);  // Overflow if (value ^ 2 * value) < 0.
2611  }
2612}
2613#endif
2614
2615void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2616                                      Label* on_not_both_smi) {
2617  STATIC_ASSERT(kSmiTag == 0);
2618  orx(r0, reg1, reg2, LeaveRC);
2619  JumpIfNotSmi(r0, on_not_both_smi);
2620}
2621
2622
2623void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2624                                       Label* smi_case) {
2625  STATIC_ASSERT(kSmiTag == 0);
2626  TestBitRange(src, kSmiTagSize - 1, 0, r0);
2627  SmiUntag(dst, src);
2628  beq(smi_case, cr0);
2629}
2630
2631void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2632                                     Label* on_either_smi) {
2633  STATIC_ASSERT(kSmiTag == 0);
2634  JumpIfSmi(reg1, on_either_smi);
2635  JumpIfSmi(reg2, on_either_smi);
2636}
2637
2638void MacroAssembler::AssertNotNumber(Register object) {
2639  if (emit_debug_code()) {
2640    STATIC_ASSERT(kSmiTag == 0);
2641    TestIfSmi(object, r0);
2642    Check(ne, kOperandIsANumber, cr0);
2643    push(object);
2644    CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2645    pop(object);
2646    Check(ne, kOperandIsANumber);
2647  }
2648}
2649
2650void MacroAssembler::AssertNotSmi(Register object) {
2651  if (emit_debug_code()) {
2652    STATIC_ASSERT(kSmiTag == 0);
2653    TestIfSmi(object, r0);
2654    Check(ne, kOperandIsASmi, cr0);
2655  }
2656}
2657
2658
2659void MacroAssembler::AssertSmi(Register object) {
2660  if (emit_debug_code()) {
2661    STATIC_ASSERT(kSmiTag == 0);
2662    TestIfSmi(object, r0);
2663    Check(eq, kOperandIsNotSmi, cr0);
2664  }
2665}
2666
2667
2668void MacroAssembler::AssertString(Register object) {
2669  if (emit_debug_code()) {
2670    STATIC_ASSERT(kSmiTag == 0);
2671    TestIfSmi(object, r0);
2672    Check(ne, kOperandIsASmiAndNotAString, cr0);
2673    push(object);
2674    LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2675    CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2676    pop(object);
2677    Check(lt, kOperandIsNotAString);
2678  }
2679}
2680
2681
2682void MacroAssembler::AssertName(Register object) {
2683  if (emit_debug_code()) {
2684    STATIC_ASSERT(kSmiTag == 0);
2685    TestIfSmi(object, r0);
2686    Check(ne, kOperandIsASmiAndNotAName, cr0);
2687    push(object);
2688    LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2689    CompareInstanceType(object, object, LAST_NAME_TYPE);
2690    pop(object);
2691    Check(le, kOperandIsNotAName);
2692  }
2693}
2694
2695
2696void MacroAssembler::AssertFunction(Register object) {
2697  if (emit_debug_code()) {
2698    STATIC_ASSERT(kSmiTag == 0);
2699    TestIfSmi(object, r0);
2700    Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2701    push(object);
2702    CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2703    pop(object);
2704    Check(eq, kOperandIsNotAFunction);
2705  }
2706}
2707
2708
2709void MacroAssembler::AssertBoundFunction(Register object) {
2710  if (emit_debug_code()) {
2711    STATIC_ASSERT(kSmiTag == 0);
2712    TestIfSmi(object, r0);
2713    Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2714    push(object);
2715    CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2716    pop(object);
2717    Check(eq, kOperandIsNotABoundFunction);
2718  }
2719}
2720
2721void MacroAssembler::AssertGeneratorObject(Register object) {
2722  if (emit_debug_code()) {
2723    STATIC_ASSERT(kSmiTag == 0);
2724    TestIfSmi(object, r0);
2725    Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
2726    push(object);
2727    CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
2728    pop(object);
2729    Check(eq, kOperandIsNotAGeneratorObject);
2730  }
2731}
2732
2733void MacroAssembler::AssertReceiver(Register object) {
2734  if (emit_debug_code()) {
2735    STATIC_ASSERT(kSmiTag == 0);
2736    TestIfSmi(object, r0);
2737    Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
2738    push(object);
2739    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2740    CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2741    pop(object);
2742    Check(ge, kOperandIsNotAReceiver);
2743  }
2744}
2745
2746void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2747                                                     Register scratch) {
2748  if (emit_debug_code()) {
2749    Label done_checking;
2750    AssertNotSmi(object);
2751    CompareRoot(object, Heap::kUndefinedValueRootIndex);
2752    beq(&done_checking);
2753    LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2754    CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2755    Assert(eq, kExpectedUndefinedOrCell);
2756    bind(&done_checking);
2757  }
2758}
2759
2760
2761void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2762  if (emit_debug_code()) {
2763    CompareRoot(reg, index);
2764    Check(eq, kHeapNumberMapRegisterClobbered);
2765  }
2766}
2767
2768
2769void MacroAssembler::JumpIfNotHeapNumber(Register object,
2770                                         Register heap_number_map,
2771                                         Register scratch,
2772                                         Label* on_not_heap_number) {
2773  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2774  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2775  cmp(scratch, heap_number_map);
2776  bne(on_not_heap_number);
2777}
2778
2779
2780void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2781    Register first, Register second, Register scratch1, Register scratch2,
2782    Label* failure) {
2783  // Test that both first and second are sequential one-byte strings.
2784  // Assume that they are non-smis.
2785  LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2786  LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2787  lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2788  lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2789
2790  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2791                                                 scratch2, failure);
2792}
2793
2794void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2795                                                           Register second,
2796                                                           Register scratch1,
2797                                                           Register scratch2,
2798                                                           Label* failure) {
2799  // Check that neither is a smi.
2800  and_(scratch1, first, second);
2801  JumpIfSmi(scratch1, failure);
2802  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2803                                               scratch2, failure);
2804}
2805
2806
2807void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2808                                                     Label* not_unique_name) {
2809  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2810  Label succeed;
2811  andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2812  beq(&succeed, cr0);
2813  cmpi(reg, Operand(SYMBOL_TYPE));
2814  bne(not_unique_name);
2815
2816  bind(&succeed);
2817}
2818
2819
2820// Allocates a heap number or jumps to the need_gc label if the young space
2821// is full and a scavenge is needed.
2822void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
2823                                        Register scratch2,
2824                                        Register heap_number_map,
2825                                        Label* gc_required,
2826                                        MutableMode mode) {
2827  // Allocate an object in the heap for the heap number and tag it as a heap
2828  // object.
2829  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
2830           NO_ALLOCATION_FLAGS);
2831
2832  Heap::RootListIndex map_index = mode == MUTABLE
2833                                      ? Heap::kMutableHeapNumberMapRootIndex
2834                                      : Heap::kHeapNumberMapRootIndex;
2835  AssertIsRoot(heap_number_map, map_index);
2836
2837  // Store heap number map in the allocated object.
2838  StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
2839        r0);
2840}
2841
2842
2843void MacroAssembler::AllocateHeapNumberWithValue(
2844    Register result, DoubleRegister value, Register scratch1, Register scratch2,
2845    Register heap_number_map, Label* gc_required) {
2846  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2847  stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2848}
2849
2850
2851void MacroAssembler::AllocateJSValue(Register result, Register constructor,
2852                                     Register value, Register scratch1,
2853                                     Register scratch2, Label* gc_required) {
2854  DCHECK(!result.is(constructor));
2855  DCHECK(!result.is(scratch1));
2856  DCHECK(!result.is(scratch2));
2857  DCHECK(!result.is(value));
2858
2859  // Allocate JSValue in new space.
2860  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
2861           NO_ALLOCATION_FLAGS);
2862
2863  // Initialize the JSValue.
2864  LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
2865  StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
2866  LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
2867  StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
2868  StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
2869  StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
2870  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
2871}
2872
2873void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
2874                                                 Register count,
2875                                                 Register filler) {
2876  Label loop;
2877  mtctr(count);
2878  bind(&loop);
2879  StoreP(filler, MemOperand(current_address));
2880  addi(current_address, current_address, Operand(kPointerSize));
2881  bdnz(&loop);
2882}
2883
2884void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2885                                                Register end_address,
2886                                                Register filler) {
2887  Label done;
2888  sub(r0, end_address, current_address, LeaveOE, SetRC);
2889  beq(&done, cr0);
2890  ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
2891  InitializeNFieldsWithFiller(current_address, r0, filler);
2892  bind(&done);
2893}
2894
2895
2896void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2897    Register first, Register second, Register scratch1, Register scratch2,
2898    Label* failure) {
2899  const int kFlatOneByteStringMask =
2900      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2901  const int kFlatOneByteStringTag =
2902      kStringTag | kOneByteStringTag | kSeqStringTag;
2903  andi(scratch1, first, Operand(kFlatOneByteStringMask));
2904  andi(scratch2, second, Operand(kFlatOneByteStringMask));
2905  cmpi(scratch1, Operand(kFlatOneByteStringTag));
2906  bne(failure);
2907  cmpi(scratch2, Operand(kFlatOneByteStringTag));
2908  bne(failure);
2909}
2910
2911static const int kRegisterPassedArguments = 8;
2912
2913
2914int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
2915                                              int num_double_arguments) {
2916  int stack_passed_words = 0;
2917  if (num_double_arguments > DoubleRegister::kNumRegisters) {
2918    stack_passed_words +=
2919        2 * (num_double_arguments - DoubleRegister::kNumRegisters);
2920  }
2921  // Up to 8 simple arguments are passed in registers r3..r10.
2922  if (num_reg_arguments > kRegisterPassedArguments) {
2923    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2924  }
2925  return stack_passed_words;
2926}
2927
2928
2929void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
2930                                               Register value,
2931                                               uint32_t encoding_mask) {
2932  Label is_object;
2933  TestIfSmi(string, r0);
2934  Check(ne, kNonObject, cr0);
2935
2936  LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
2937  lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
2938
2939  andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
2940  cmpi(ip, Operand(encoding_mask));
2941  Check(eq, kUnexpectedStringType);
2942
2943// The index is assumed to be untagged coming in, tag it to compare with the
2944// string length without using a temp register, it is restored at the end of
2945// this function.
2946#if !V8_TARGET_ARCH_PPC64
2947  Label index_tag_ok, index_tag_bad;
2948  JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
2949#endif
2950  SmiTag(index, index);
2951#if !V8_TARGET_ARCH_PPC64
2952  b(&index_tag_ok);
2953  bind(&index_tag_bad);
2954  Abort(kIndexIsTooLarge);
2955  bind(&index_tag_ok);
2956#endif
2957
2958  LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
2959  cmp(index, ip);
2960  Check(lt, kIndexIsTooLarge);
2961
2962  DCHECK(Smi::kZero == 0);
2963  cmpi(index, Operand::Zero());
2964  Check(ge, kIndexIsNegative);
2965
2966  SmiUntag(index, index);
2967}
2968
2969
2970void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2971                                          int num_double_arguments,
2972                                          Register scratch) {
2973  int frame_alignment = ActivationFrameAlignment();
2974  int stack_passed_arguments =
2975      CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2976  int stack_space = kNumRequiredStackFrameSlots;
2977
2978  if (frame_alignment > kPointerSize) {
2979    // Make stack end at alignment and make room for stack arguments
2980    // -- preserving original value of sp.
2981    mr(scratch, sp);
2982    addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
2983    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2984    ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
2985    StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2986  } else {
2987    // Make room for stack arguments
2988    stack_space += stack_passed_arguments;
2989  }
2990
2991  // Allocate frame with required slots to make ABI work.
2992  li(r0, Operand::Zero());
2993  StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
2994}
2995
2996
2997void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
2998                                          Register scratch) {
2999  PrepareCallCFunction(num_reg_arguments, 0, scratch);
3000}
3001
3002
3003void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3004
3005
3006void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3007
3008
3009void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3010                                          DoubleRegister src2) {
3011  if (src2.is(d1)) {
3012    DCHECK(!src1.is(d2));
3013    Move(d2, src2);
3014    Move(d1, src1);
3015  } else {
3016    Move(d1, src1);
3017    Move(d2, src2);
3018  }
3019}
3020
3021
3022void MacroAssembler::CallCFunction(ExternalReference function,
3023                                   int num_reg_arguments,
3024                                   int num_double_arguments) {
3025  mov(ip, Operand(function));
3026  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3027}
3028
3029
3030void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3031                                   int num_double_arguments) {
3032  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3033}
3034
3035
3036void MacroAssembler::CallCFunction(ExternalReference function,
3037                                   int num_arguments) {
3038  CallCFunction(function, num_arguments, 0);
3039}
3040
3041
3042void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3043  CallCFunction(function, num_arguments, 0);
3044}
3045
3046
3047void MacroAssembler::CallCFunctionHelper(Register function,
3048                                         int num_reg_arguments,
3049                                         int num_double_arguments) {
3050  DCHECK(has_frame());
3051
3052  // Just call directly. The function called cannot cause a GC, or
3053  // allow preemption, so the return address in the link register
3054  // stays correct.
3055  Register dest = function;
3056  if (ABI_USES_FUNCTION_DESCRIPTORS) {
3057    // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
3058    // aware of this descriptor and pick up values from it
3059    LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3060    LoadP(ip, MemOperand(function, 0));
3061    dest = ip;
3062  } else if (ABI_CALL_VIA_IP) {
3063    Move(ip, function);
3064    dest = ip;
3065  }
3066
3067  Call(dest);
3068
3069  // Remove frame bought in PrepareCallCFunction
3070  int stack_passed_arguments =
3071      CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3072  int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3073  if (ActivationFrameAlignment() > kPointerSize) {
3074    LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3075  } else {
3076    addi(sp, sp, Operand(stack_space * kPointerSize));
3077  }
3078}
3079
3080
3081void MacroAssembler::DecodeConstantPoolOffset(Register result,
3082                                              Register location) {
3083  Label overflow_access, done;
3084  DCHECK(!AreAliased(result, location, r0));
3085
3086  // Determine constant pool access type
3087  // Caller has already placed the instruction word at location in result.
3088  ExtractBitRange(r0, result, 31, 26);
3089  cmpi(r0, Operand(ADDIS >> 26));
3090  beq(&overflow_access);
3091
3092  // Regular constant pool access
3093  // extract the load offset
3094  andi(result, result, Operand(kImm16Mask));
3095  b(&done);
3096
3097  bind(&overflow_access);
3098  // Overflow constant pool access
3099  // shift addis immediate
3100  slwi(r0, result, Operand(16));
3101  // sign-extend and add the load offset
3102  lwz(result, MemOperand(location, kInstrSize));
3103  extsh(result, result);
3104  add(result, r0, result);
3105
3106  bind(&done);
3107}
3108
3109
3110void MacroAssembler::CheckPageFlag(
3111    Register object,
3112    Register scratch,  // scratch may be same register as object
3113    int mask, Condition cc, Label* condition_met) {
3114  DCHECK(cc == ne || cc == eq);
3115  ClearRightImm(scratch, object, Operand(kPageSizeBits));
3116  LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3117
3118  And(r0, scratch, Operand(mask), SetRC);
3119
3120  if (cc == ne) {
3121    bne(condition_met, cr0);
3122  }
3123  if (cc == eq) {
3124    beq(condition_met, cr0);
3125  }
3126}
3127
3128
3129void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3130                                 Register scratch1, Label* on_black) {
3131  HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
3132  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3133}
3134
3135
3136void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3137                              Register mask_scratch, Label* has_color,
3138                              int first_bit, int second_bit) {
3139  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3140
3141  GetMarkBits(object, bitmap_scratch, mask_scratch);
3142
3143  Label other_color, word_boundary;
3144  lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3145  // Test the first bit
3146  and_(r0, ip, mask_scratch, SetRC);
3147  b(first_bit == 1 ? eq : ne, &other_color, cr0);
3148  // Shift left 1
3149  // May need to load the next cell
3150  slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3151  beq(&word_boundary, cr0);
3152  // Test the second bit
3153  and_(r0, ip, mask_scratch, SetRC);
3154  b(second_bit == 1 ? ne : eq, has_color, cr0);
3155  b(&other_color);
3156
3157  bind(&word_boundary);
3158  lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3159  andi(r0, ip, Operand(1));
3160  b(second_bit == 1 ? ne : eq, has_color, cr0);
3161  bind(&other_color);
3162}
3163
3164
3165void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3166                                 Register mask_reg) {
3167  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3168  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3169  lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3170  and_(bitmap_reg, addr_reg, r0);
3171  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3172  ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3173  ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3174  ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3175  add(bitmap_reg, bitmap_reg, ip);
3176  li(ip, Operand(1));
3177  slw(mask_reg, ip, mask_reg);
3178}
3179
3180
3181void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3182                                 Register mask_scratch, Register load_scratch,
3183                                 Label* value_is_white) {
3184  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3185  GetMarkBits(value, bitmap_scratch, mask_scratch);
3186
3187  // If the value is black or grey we don't need to do anything.
3188  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3189  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3190  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3191  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3192
3193  // Since both black and grey have a 1 in the first position and white does
3194  // not have a 1 there we only need to check one bit.
3195  lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3196  and_(r0, mask_scratch, load_scratch, SetRC);
3197  beq(value_is_white, cr0);
3198}
3199
3200
3201// Saturate a value into 8-bit unsigned integer
3202//   if input_value < 0, output_value is 0
3203//   if input_value > 255, output_value is 255
3204//   otherwise output_value is the input_value
3205void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3206  int satval = (1 << 8) - 1;
3207
3208  if (CpuFeatures::IsSupported(ISELECT)) {
3209    // set to 0 if negative
3210    cmpi(input_reg, Operand::Zero());
3211    isel(lt, output_reg, r0, input_reg);
3212
3213    // set to satval if > satval
3214    li(r0, Operand(satval));
3215    cmpi(output_reg, Operand(satval));
3216    isel(lt, output_reg, output_reg, r0);
3217  } else {
3218    Label done, negative_label, overflow_label;
3219    cmpi(input_reg, Operand::Zero());
3220    blt(&negative_label);
3221
3222    cmpi(input_reg, Operand(satval));
3223    bgt(&overflow_label);
3224    if (!output_reg.is(input_reg)) {
3225      mr(output_reg, input_reg);
3226    }
3227    b(&done);
3228
3229    bind(&negative_label);
3230    li(output_reg, Operand::Zero());  // set to 0 if negative
3231    b(&done);
3232
3233    bind(&overflow_label);  // set to satval if > satval
3234    li(output_reg, Operand(satval));
3235
3236    bind(&done);
3237  }
3238}
3239
3240
3241void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3242
3243
3244void MacroAssembler::ResetRoundingMode() {
3245  mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
3246}
3247
3248
3249void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3250                                        DoubleRegister input_reg,
3251                                        DoubleRegister double_scratch) {
3252  Label above_zero;
3253  Label done;
3254  Label in_bounds;
3255
3256  LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3257  fcmpu(input_reg, double_scratch);
3258  bgt(&above_zero);
3259
3260  // Double value is less than zero, NaN or Inf, return 0.
3261  LoadIntLiteral(result_reg, 0);
3262  b(&done);
3263
3264  // Double value is >= 255, return 255.
3265  bind(&above_zero);
3266  LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3267  fcmpu(input_reg, double_scratch);
3268  ble(&in_bounds);
3269  LoadIntLiteral(result_reg, 255);
3270  b(&done);
3271
3272  // In 0-255 range, round and truncate.
3273  bind(&in_bounds);
3274
3275  // round to nearest (default rounding mode)
3276  fctiw(double_scratch, input_reg);
3277  MovDoubleLowToInt(result_reg, double_scratch);
3278  bind(&done);
3279}
3280
3281
3282void MacroAssembler::LoadInstanceDescriptors(Register map,
3283                                             Register descriptors) {
3284  LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3285}
3286
3287
3288void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3289  lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3290  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3291}
3292
3293
3294void MacroAssembler::EnumLength(Register dst, Register map) {
3295  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3296  lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3297  ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3298  SmiTag(dst);
3299}
3300
3301
3302void MacroAssembler::LoadAccessor(Register dst, Register holder,
3303                                  int accessor_index,
3304                                  AccessorComponent accessor) {
3305  LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3306  LoadInstanceDescriptors(dst, dst);
3307  LoadP(dst,
3308        FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3309  const int getterOffset = AccessorPair::kGetterOffset;
3310  const int setterOffset = AccessorPair::kSetterOffset;
3311  int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3312  LoadP(dst, FieldMemOperand(dst, offset));
3313}
3314
3315
3316void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3317  Register null_value = r8;
3318  Register empty_fixed_array_value = r9;
3319  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3320  Label next, start;
3321  mr(r5, r3);
3322
3323  // Check if the enum length field is properly initialized, indicating that
3324  // there is an enum cache.
3325  LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3326
3327  EnumLength(r6, r4);
3328  CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3329  beq(call_runtime);
3330
3331  LoadRoot(null_value, Heap::kNullValueRootIndex);
3332  b(&start);
3333
3334  bind(&next);
3335  LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3336
3337  // For all objects but the receiver, check that the cache is empty.
3338  EnumLength(r6, r4);
3339  CmpSmiLiteral(r6, Smi::kZero, r0);
3340  bne(call_runtime);
3341
3342  bind(&start);
3343
3344  // Check that there are no elements. Register r5 contains the current JS
3345  // object we've reached through the prototype chain.
3346  Label no_elements;
3347  LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
3348  cmp(r5, empty_fixed_array_value);
3349  beq(&no_elements);
3350
3351  // Second chance, the object may be using the empty slow element dictionary.
3352  CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3353  bne(call_runtime);
3354
3355  bind(&no_elements);
3356  LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
3357  cmp(r5, null_value);
3358  bne(&next);
3359}
3360
3361
3362////////////////////////////////////////////////////////////////////////////////
3363//
3364// New MacroAssembler Interfaces added for PPC
3365//
3366////////////////////////////////////////////////////////////////////////////////
3367void MacroAssembler::LoadIntLiteral(Register dst, int value) {
3368  mov(dst, Operand(value));
3369}
3370
3371
3372void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3373  mov(dst, Operand(smi));
3374}
3375
3376
3377void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3378                                       Register scratch) {
3379  if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
3380      !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
3381    ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
3382    if (access == ConstantPoolEntry::OVERFLOWED) {
3383      addis(scratch, kConstantPoolRegister, Operand::Zero());
3384      lfd(result, MemOperand(scratch, 0));
3385    } else {
3386      lfd(result, MemOperand(kConstantPoolRegister, 0));
3387    }
3388    return;
3389  }
3390
3391  // avoid gcc strict aliasing error using union cast
3392  union {
3393    double dval;
3394#if V8_TARGET_ARCH_PPC64
3395    intptr_t ival;
3396#else
3397    intptr_t ival[2];
3398#endif
3399  } litVal;
3400
3401  litVal.dval = value;
3402
3403#if V8_TARGET_ARCH_PPC64
3404  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3405    mov(scratch, Operand(litVal.ival));
3406    mtfprd(result, scratch);
3407    return;
3408  }
3409#endif
3410
3411  addi(sp, sp, Operand(-kDoubleSize));
3412#if V8_TARGET_ARCH_PPC64
3413  mov(scratch, Operand(litVal.ival));
3414  std(scratch, MemOperand(sp));
3415#else
3416  LoadIntLiteral(scratch, litVal.ival[0]);
3417  stw(scratch, MemOperand(sp, 0));
3418  LoadIntLiteral(scratch, litVal.ival[1]);
3419  stw(scratch, MemOperand(sp, 4));
3420#endif
3421  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3422  lfd(result, MemOperand(sp, 0));
3423  addi(sp, sp, Operand(kDoubleSize));
3424}
3425
3426
3427void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
3428                                    Register scratch) {
3429// sign-extend src to 64-bit
3430#if V8_TARGET_ARCH_PPC64
3431  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3432    mtfprwa(dst, src);
3433    return;
3434  }
3435#endif
3436
3437  DCHECK(!src.is(scratch));
3438  subi(sp, sp, Operand(kDoubleSize));
3439#if V8_TARGET_ARCH_PPC64
3440  extsw(scratch, src);
3441  std(scratch, MemOperand(sp, 0));
3442#else
3443  srawi(scratch, src, 31);
3444  stw(scratch, MemOperand(sp, Register::kExponentOffset));
3445  stw(src, MemOperand(sp, Register::kMantissaOffset));
3446#endif
3447  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3448  lfd(dst, MemOperand(sp, 0));
3449  addi(sp, sp, Operand(kDoubleSize));
3450}
3451
3452
3453void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
3454                                            Register scratch) {
3455// zero-extend src to 64-bit
3456#if V8_TARGET_ARCH_PPC64
3457  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3458    mtfprwz(dst, src);
3459    return;
3460  }
3461#endif
3462
3463  DCHECK(!src.is(scratch));
3464  subi(sp, sp, Operand(kDoubleSize));
3465#if V8_TARGET_ARCH_PPC64
3466  clrldi(scratch, src, Operand(32));
3467  std(scratch, MemOperand(sp, 0));
3468#else
3469  li(scratch, Operand::Zero());
3470  stw(scratch, MemOperand(sp, Register::kExponentOffset));
3471  stw(src, MemOperand(sp, Register::kMantissaOffset));
3472#endif
3473  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3474  lfd(dst, MemOperand(sp, 0));
3475  addi(sp, sp, Operand(kDoubleSize));
3476}
3477
3478
3479void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
3480#if !V8_TARGET_ARCH_PPC64
3481                                      Register src_hi,
3482#endif
3483                                      Register src) {
3484#if V8_TARGET_ARCH_PPC64
3485  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3486    mtfprd(dst, src);
3487    return;
3488  }
3489#endif
3490
3491  subi(sp, sp, Operand(kDoubleSize));
3492#if V8_TARGET_ARCH_PPC64
3493  std(src, MemOperand(sp, 0));
3494#else
3495  stw(src_hi, MemOperand(sp, Register::kExponentOffset));
3496  stw(src, MemOperand(sp, Register::kMantissaOffset));
3497#endif
3498  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3499  lfd(dst, MemOperand(sp, 0));
3500  addi(sp, sp, Operand(kDoubleSize));
3501}
3502
3503
3504#if V8_TARGET_ARCH_PPC64
3505void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
3506                                                Register src_hi,
3507                                                Register src_lo,
3508                                                Register scratch) {
3509  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3510    sldi(scratch, src_hi, Operand(32));
3511    rldimi(scratch, src_lo, 0, 32);
3512    mtfprd(dst, scratch);
3513    return;
3514  }
3515
3516  subi(sp, sp, Operand(kDoubleSize));
3517  stw(src_hi, MemOperand(sp, Register::kExponentOffset));
3518  stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
3519  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3520  lfd(dst, MemOperand(sp));
3521  addi(sp, sp, Operand(kDoubleSize));
3522}
3523#endif
3524
3525
3526void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
3527                                     Register scratch) {
3528#if V8_TARGET_ARCH_PPC64
3529  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3530    mffprd(scratch, dst);
3531    rldimi(scratch, src, 0, 32);
3532    mtfprd(dst, scratch);
3533    return;
3534  }
3535#endif
3536
3537  subi(sp, sp, Operand(kDoubleSize));
3538  stfd(dst, MemOperand(sp));
3539  stw(src, MemOperand(sp, Register::kMantissaOffset));
3540  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3541  lfd(dst, MemOperand(sp));
3542  addi(sp, sp, Operand(kDoubleSize));
3543}
3544
3545
3546void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
3547                                      Register scratch) {
3548#if V8_TARGET_ARCH_PPC64
3549  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3550    mffprd(scratch, dst);
3551    rldimi(scratch, src, 32, 0);
3552    mtfprd(dst, scratch);
3553    return;
3554  }
3555#endif
3556
3557  subi(sp, sp, Operand(kDoubleSize));
3558  stfd(dst, MemOperand(sp));
3559  stw(src, MemOperand(sp, Register::kExponentOffset));
3560  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3561  lfd(dst, MemOperand(sp));
3562  addi(sp, sp, Operand(kDoubleSize));
3563}
3564
3565
3566void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
3567#if V8_TARGET_ARCH_PPC64
3568  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3569    mffprwz(dst, src);
3570    return;
3571  }
3572#endif
3573
3574  subi(sp, sp, Operand(kDoubleSize));
3575  stfd(src, MemOperand(sp));
3576  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3577  lwz(dst, MemOperand(sp, Register::kMantissaOffset));
3578  addi(sp, sp, Operand(kDoubleSize));
3579}
3580
3581
3582void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
3583#if V8_TARGET_ARCH_PPC64
3584  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3585    mffprd(dst, src);
3586    srdi(dst, dst, Operand(32));
3587    return;
3588  }
3589#endif
3590
3591  subi(sp, sp, Operand(kDoubleSize));
3592  stfd(src, MemOperand(sp));
3593  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3594  lwz(dst, MemOperand(sp, Register::kExponentOffset));
3595  addi(sp, sp, Operand(kDoubleSize));
3596}
3597
3598
3599void MacroAssembler::MovDoubleToInt64(
3600#if !V8_TARGET_ARCH_PPC64
3601    Register dst_hi,
3602#endif
3603    Register dst, DoubleRegister src) {
3604#if V8_TARGET_ARCH_PPC64
3605  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3606    mffprd(dst, src);
3607    return;
3608  }
3609#endif
3610
3611  subi(sp, sp, Operand(kDoubleSize));
3612  stfd(src, MemOperand(sp));
3613  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3614#if V8_TARGET_ARCH_PPC64
3615  ld(dst, MemOperand(sp, 0));
3616#else
3617  lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
3618  lwz(dst, MemOperand(sp, Register::kMantissaOffset));
3619#endif
3620  addi(sp, sp, Operand(kDoubleSize));
3621}
3622
3623
3624void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
3625  subi(sp, sp, Operand(kFloatSize));
3626  stw(src, MemOperand(sp, 0));
3627  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3628  lfs(dst, MemOperand(sp, 0));
3629  addi(sp, sp, Operand(kFloatSize));
3630}
3631
3632
3633void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
3634  subi(sp, sp, Operand(kFloatSize));
3635  stfs(src, MemOperand(sp, 0));
3636  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
3637  lwz(dst, MemOperand(sp, 0));
3638  addi(sp, sp, Operand(kFloatSize));
3639}
3640
3641
3642void MacroAssembler::Add(Register dst, Register src, intptr_t value,
3643                         Register scratch) {
3644  if (is_int16(value)) {
3645    addi(dst, src, Operand(value));
3646  } else {
3647    mov(scratch, Operand(value));
3648    add(dst, src, scratch);
3649  }
3650}
3651
3652
3653void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
3654                          CRegister cr) {
3655  intptr_t value = src2.immediate();
3656  if (is_int16(value)) {
3657    cmpi(src1, src2, cr);
3658  } else {
3659    mov(scratch, src2);
3660    cmp(src1, scratch, cr);
3661  }
3662}
3663
3664
3665void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
3666                           CRegister cr) {
3667  intptr_t value = src2.immediate();
3668  if (is_uint16(value)) {
3669    cmpli(src1, src2, cr);
3670  } else {
3671    mov(scratch, src2);
3672    cmpl(src1, scratch, cr);
3673  }
3674}
3675
3676
3677void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
3678                           CRegister cr) {
3679  intptr_t value = src2.immediate();
3680  if (is_int16(value)) {
3681    cmpwi(src1, src2, cr);
3682  } else {
3683    mov(scratch, src2);
3684    cmpw(src1, scratch, cr);
3685  }
3686}
3687
3688
3689void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
3690                            Register scratch, CRegister cr) {
3691  intptr_t value = src2.immediate();
3692  if (is_uint16(value)) {
3693    cmplwi(src1, src2, cr);
3694  } else {
3695    mov(scratch, src2);
3696    cmplw(src1, scratch, cr);
3697  }
3698}
3699
3700
3701void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
3702                         RCBit rc) {
3703  if (rb.is_reg()) {
3704    and_(ra, rs, rb.rm(), rc);
3705  } else {
3706    if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
3707      andi(ra, rs, rb);
3708    } else {
3709      // mov handles the relocation.
3710      DCHECK(!rs.is(r0));
3711      mov(r0, rb);
3712      and_(ra, rs, r0, rc);
3713    }
3714  }
3715}
3716
3717
3718void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
3719  if (rb.is_reg()) {
3720    orx(ra, rs, rb.rm(), rc);
3721  } else {
3722    if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
3723      ori(ra, rs, rb);
3724    } else {
3725      // mov handles the relocation.
3726      DCHECK(!rs.is(r0));
3727      mov(r0, rb);
3728      orx(ra, rs, r0, rc);
3729    }
3730  }
3731}
3732
3733
3734void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
3735                         RCBit rc) {
3736  if (rb.is_reg()) {
3737    xor_(ra, rs, rb.rm(), rc);
3738  } else {
3739    if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
3740      xori(ra, rs, rb);
3741    } else {
3742      // mov handles the relocation.
3743      DCHECK(!rs.is(r0));
3744      mov(r0, rb);
3745      xor_(ra, rs, r0, rc);
3746    }
3747  }
3748}
3749
3750
3751void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
3752                                   CRegister cr) {
3753#if V8_TARGET_ARCH_PPC64
3754  LoadSmiLiteral(scratch, smi);
3755  cmp(src1, scratch, cr);
3756#else
3757  Cmpi(src1, Operand(smi), scratch, cr);
3758#endif
3759}
3760
3761
3762void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
3763                                    CRegister cr) {
3764#if V8_TARGET_ARCH_PPC64
3765  LoadSmiLiteral(scratch, smi);
3766  cmpl(src1, scratch, cr);
3767#else
3768  Cmpli(src1, Operand(smi), scratch, cr);
3769#endif
3770}
3771
3772
3773void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
3774                                   Register scratch) {
3775#if V8_TARGET_ARCH_PPC64
3776  LoadSmiLiteral(scratch, smi);
3777  add(dst, src, scratch);
3778#else
3779  Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
3780#endif
3781}
3782
3783
3784void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
3785                                   Register scratch) {
3786#if V8_TARGET_ARCH_PPC64
3787  LoadSmiLiteral(scratch, smi);
3788  sub(dst, src, scratch);
3789#else
3790  Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
3791#endif
3792}
3793
3794
3795void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
3796                                   Register scratch, RCBit rc) {
3797#if V8_TARGET_ARCH_PPC64
3798  LoadSmiLiteral(scratch, smi);
3799  and_(dst, src, scratch, rc);
3800#else
3801  And(dst, src, Operand(smi), rc);
3802#endif
3803}
3804
3805
3806// Load a "pointer" sized value from the memory location
3807void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
3808                           Register scratch) {
3809  int offset = mem.offset();
3810
3811  if (!is_int16(offset)) {
3812    /* cannot use d-form */
3813    DCHECK(!scratch.is(no_reg));
3814    mov(scratch, Operand(offset));
3815    LoadPX(dst, MemOperand(mem.ra(), scratch));
3816  } else {
3817#if V8_TARGET_ARCH_PPC64
3818    int misaligned = (offset & 3);
3819    if (misaligned) {
3820      // adjust base to conform to offset alignment requirements
3821      // Todo: enhance to use scratch if dst is unsuitable
3822      DCHECK(!dst.is(r0));
3823      addi(dst, mem.ra(), Operand((offset & 3) - 4));
3824      ld(dst, MemOperand(dst, (offset & ~3) + 4));
3825    } else {
3826      ld(dst, mem);
3827    }
3828#else
3829    lwz(dst, mem);
3830#endif
3831  }
3832}
3833
3834void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
3835                            Register scratch) {
3836  int offset = mem.offset();
3837
3838  if (!is_int16(offset)) {
3839    /* cannot use d-form */
3840    DCHECK(!scratch.is(no_reg));
3841    mov(scratch, Operand(offset));
3842    LoadPUX(dst, MemOperand(mem.ra(), scratch));
3843  } else {
3844#if V8_TARGET_ARCH_PPC64
3845    ldu(dst, mem);
3846#else
3847    lwzu(dst, mem);
3848#endif
3849  }
3850}
3851
3852// Store a "pointer" sized value to the memory location
3853void MacroAssembler::StoreP(Register src, const MemOperand& mem,
3854                            Register scratch) {
3855  int offset = mem.offset();
3856
3857  if (!is_int16(offset)) {
3858    /* cannot use d-form */
3859    DCHECK(!scratch.is(no_reg));
3860    mov(scratch, Operand(offset));
3861    StorePX(src, MemOperand(mem.ra(), scratch));
3862  } else {
3863#if V8_TARGET_ARCH_PPC64
3864    int misaligned = (offset & 3);
3865    if (misaligned) {
3866      // adjust base to conform to offset alignment requirements
3867      // a suitable scratch is required here
3868      DCHECK(!scratch.is(no_reg));
3869      if (scratch.is(r0)) {
3870        LoadIntLiteral(scratch, offset);
3871        stdx(src, MemOperand(mem.ra(), scratch));
3872      } else {
3873        addi(scratch, mem.ra(), Operand((offset & 3) - 4));
3874        std(src, MemOperand(scratch, (offset & ~3) + 4));
3875      }
3876    } else {
3877      std(src, mem);
3878    }
3879#else
3880    stw(src, mem);
3881#endif
3882  }
3883}
3884
3885void MacroAssembler::StorePU(Register src, const MemOperand& mem,
3886                             Register scratch) {
3887  int offset = mem.offset();
3888
3889  if (!is_int16(offset)) {
3890    /* cannot use d-form */
3891    DCHECK(!scratch.is(no_reg));
3892    mov(scratch, Operand(offset));
3893    StorePUX(src, MemOperand(mem.ra(), scratch));
3894  } else {
3895#if V8_TARGET_ARCH_PPC64
3896    stdu(src, mem);
3897#else
3898    stwu(src, mem);
3899#endif
3900  }
3901}
3902
3903void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
3904                                   Register scratch) {
3905  int offset = mem.offset();
3906
3907  if (!is_int16(offset)) {
3908    DCHECK(!scratch.is(no_reg));
3909    mov(scratch, Operand(offset));
3910    lwax(dst, MemOperand(mem.ra(), scratch));
3911  } else {
3912#if V8_TARGET_ARCH_PPC64
3913    int misaligned = (offset & 3);
3914    if (misaligned) {
3915      // adjust base to conform to offset alignment requirements
3916      // Todo: enhance to use scratch if dst is unsuitable
3917      DCHECK(!dst.is(r0));
3918      addi(dst, mem.ra(), Operand((offset & 3) - 4));
3919      lwa(dst, MemOperand(dst, (offset & ~3) + 4));
3920    } else {
3921      lwa(dst, mem);
3922    }
3923#else
3924    lwz(dst, mem);
3925#endif
3926  }
3927}
3928
3929
3930// Variable length depending on whether offset fits into immediate field
3931// MemOperand currently only supports d-form
3932void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
3933                              Register scratch) {
3934  Register base = mem.ra();
3935  int offset = mem.offset();
3936
3937  if (!is_int16(offset)) {
3938    LoadIntLiteral(scratch, offset);
3939    lwzx(dst, MemOperand(base, scratch));
3940  } else {
3941    lwz(dst, mem);
3942  }
3943}
3944
3945
3946// Variable length depending on whether offset fits into immediate field
3947// MemOperand current only supports d-form
3948void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
3949                               Register scratch) {
3950  Register base = mem.ra();
3951  int offset = mem.offset();
3952
3953  if (!is_int16(offset)) {
3954    LoadIntLiteral(scratch, offset);
3955    stwx(src, MemOperand(base, scratch));
3956  } else {
3957    stw(src, mem);
3958  }
3959}
3960
3961
3962void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
3963                                       Register scratch) {
3964  int offset = mem.offset();
3965
3966  if (!is_int16(offset)) {
3967    DCHECK(!scratch.is(no_reg));
3968    mov(scratch, Operand(offset));
3969    lhax(dst, MemOperand(mem.ra(), scratch));
3970  } else {
3971    lha(dst, mem);
3972  }
3973}
3974
3975
3976// Variable length depending on whether offset fits into immediate field
3977// MemOperand currently only supports d-form
3978void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
3979                                  Register scratch) {
3980  Register base = mem.ra();
3981  int offset = mem.offset();
3982
3983  if (!is_int16(offset)) {
3984    LoadIntLiteral(scratch, offset);
3985    lhzx(dst, MemOperand(base, scratch));
3986  } else {
3987    lhz(dst, mem);
3988  }
3989}
3990
3991
3992// Variable length depending on whether offset fits into immediate field
3993// MemOperand current only supports d-form
3994void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
3995                                   Register scratch) {
3996  Register base = mem.ra();
3997  int offset = mem.offset();
3998
3999  if (!is_int16(offset)) {
4000    LoadIntLiteral(scratch, offset);
4001    sthx(src, MemOperand(base, scratch));
4002  } else {
4003    sth(src, mem);
4004  }
4005}
4006
4007
4008// Variable length depending on whether offset fits into immediate field
4009// MemOperand currently only supports d-form
4010void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4011                              Register scratch) {
4012  Register base = mem.ra();
4013  int offset = mem.offset();
4014
4015  if (!is_int16(offset)) {
4016    LoadIntLiteral(scratch, offset);
4017    lbzx(dst, MemOperand(base, scratch));
4018  } else {
4019    lbz(dst, mem);
4020  }
4021}
4022
4023
4024// Variable length depending on whether offset fits into immediate field
4025// MemOperand current only supports d-form
4026void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4027                               Register scratch) {
4028  Register base = mem.ra();
4029  int offset = mem.offset();
4030
4031  if (!is_int16(offset)) {
4032    LoadIntLiteral(scratch, offset);
4033    stbx(src, MemOperand(base, scratch));
4034  } else {
4035    stb(src, mem);
4036  }
4037}
4038
4039
4040void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4041                                        Representation r, Register scratch) {
4042  DCHECK(!r.IsDouble());
4043  if (r.IsInteger8()) {
4044    LoadByte(dst, mem, scratch);
4045    extsb(dst, dst);
4046  } else if (r.IsUInteger8()) {
4047    LoadByte(dst, mem, scratch);
4048  } else if (r.IsInteger16()) {
4049    LoadHalfWordArith(dst, mem, scratch);
4050  } else if (r.IsUInteger16()) {
4051    LoadHalfWord(dst, mem, scratch);
4052#if V8_TARGET_ARCH_PPC64
4053  } else if (r.IsInteger32()) {
4054    LoadWordArith(dst, mem, scratch);
4055#endif
4056  } else {
4057    LoadP(dst, mem, scratch);
4058  }
4059}
4060
4061
4062void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4063                                         Representation r, Register scratch) {
4064  DCHECK(!r.IsDouble());
4065  if (r.IsInteger8() || r.IsUInteger8()) {
4066    StoreByte(src, mem, scratch);
4067  } else if (r.IsInteger16() || r.IsUInteger16()) {
4068    StoreHalfWord(src, mem, scratch);
4069#if V8_TARGET_ARCH_PPC64
4070  } else if (r.IsInteger32()) {
4071    StoreWord(src, mem, scratch);
4072#endif
4073  } else {
4074    if (r.IsHeapObject()) {
4075      AssertNotSmi(src);
4076    } else if (r.IsSmi()) {
4077      AssertSmi(src);
4078    }
4079    StoreP(src, mem, scratch);
4080  }
4081}
4082
4083
4084void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
4085                                Register scratch) {
4086  Register base = mem.ra();
4087  int offset = mem.offset();
4088
4089  if (!is_int16(offset)) {
4090    mov(scratch, Operand(offset));
4091    lfdx(dst, MemOperand(base, scratch));
4092  } else {
4093    lfd(dst, mem);
4094  }
4095}
4096
4097void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
4098                                Register scratch) {
4099  Register base = mem.ra();
4100  int offset = mem.offset();
4101
4102  if (!is_int16(offset)) {
4103    mov(scratch, Operand(offset));
4104    lfdux(dst, MemOperand(base, scratch));
4105  } else {
4106    lfdu(dst, mem);
4107  }
4108}
4109
4110void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
4111                                Register scratch) {
4112  Register base = mem.ra();
4113  int offset = mem.offset();
4114
4115  if (!is_int16(offset)) {
4116    mov(scratch, Operand(offset));
4117    lfsx(dst, MemOperand(base, scratch));
4118  } else {
4119    lfs(dst, mem);
4120  }
4121}
4122
4123void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
4124                                Register scratch) {
4125  Register base = mem.ra();
4126  int offset = mem.offset();
4127
4128  if (!is_int16(offset)) {
4129    mov(scratch, Operand(offset));
4130    lfsux(dst, MemOperand(base, scratch));
4131  } else {
4132    lfsu(dst, mem);
4133  }
4134}
4135
4136void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
4137                                 Register scratch) {
4138  Register base = mem.ra();
4139  int offset = mem.offset();
4140
4141  if (!is_int16(offset)) {
4142    mov(scratch, Operand(offset));
4143    stfdx(src, MemOperand(base, scratch));
4144  } else {
4145    stfd(src, mem);
4146  }
4147}
4148
4149void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
4150                                 Register scratch) {
4151  Register base = mem.ra();
4152  int offset = mem.offset();
4153
4154  if (!is_int16(offset)) {
4155    mov(scratch, Operand(offset));
4156    stfdux(src, MemOperand(base, scratch));
4157  } else {
4158    stfdu(src, mem);
4159  }
4160}
4161
4162void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
4163                                 Register scratch) {
4164  Register base = mem.ra();
4165  int offset = mem.offset();
4166
4167  if (!is_int16(offset)) {
4168    mov(scratch, Operand(offset));
4169    stfsx(src, MemOperand(base, scratch));
4170  } else {
4171    stfs(src, mem);
4172  }
4173}
4174
4175void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
4176                                 Register scratch) {
4177  Register base = mem.ra();
4178  int offset = mem.offset();
4179
4180  if (!is_int16(offset)) {
4181    mov(scratch, Operand(offset));
4182    stfsux(src, MemOperand(base, scratch));
4183  } else {
4184    stfsu(src, mem);
4185  }
4186}
4187
4188void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4189                                                     Register scratch_reg,
4190                                                     Register scratch2_reg,
4191                                                     Label* no_memento_found) {
4192  Label map_check;
4193  Label top_check;
4194  ExternalReference new_space_allocation_top_adr =
4195      ExternalReference::new_space_allocation_top_address(isolate());
4196  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
4197  const int kMementoLastWordOffset =
4198      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
4199  Register mask = scratch2_reg;
4200
4201  DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
4202
4203  // Bail out if the object is not in new space.
4204  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
4205
4206  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
4207  lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
4208  addi(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
4209
4210  // If the object is in new space, we need to check whether it is on the same
4211  // page as the current top.
4212  mov(ip, Operand(new_space_allocation_top_adr));
4213  LoadP(ip, MemOperand(ip));
4214  Xor(r0, scratch_reg, Operand(ip));
4215  and_(r0, r0, mask, SetRC);
4216  beq(&top_check, cr0);
4217  // The object is on a different page than allocation top. Bail out if the
4218  // object sits on the page boundary as no memento can follow and we cannot
4219  // touch the memory following it.
4220  xor_(r0, scratch_reg, receiver_reg);
4221  and_(r0, r0, mask, SetRC);
4222  bne(no_memento_found, cr0);
4223  // Continue with the actual map check.
4224  b(&map_check);
4225  // If top is on the same page as the current object, we need to check whether
4226  // we are below top.
4227  bind(&top_check);
4228  cmp(scratch_reg, ip);
4229  bge(no_memento_found);
4230  // Memento map check.
4231  bind(&map_check);
4232  LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
4233  Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4234       r0);
4235}
4236
4237Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4238                                   Register reg4, Register reg5,
4239                                   Register reg6) {
4240  RegList regs = 0;
4241  if (reg1.is_valid()) regs |= reg1.bit();
4242  if (reg2.is_valid()) regs |= reg2.bit();
4243  if (reg3.is_valid()) regs |= reg3.bit();
4244  if (reg4.is_valid()) regs |= reg4.bit();
4245  if (reg5.is_valid()) regs |= reg5.bit();
4246  if (reg6.is_valid()) regs |= reg6.bit();
4247
4248  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
4249  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
4250    int code = config->GetAllocatableGeneralCode(i);
4251    Register candidate = Register::from_code(code);
4252    if (regs & candidate.bit()) continue;
4253    return candidate;
4254  }
4255  UNREACHABLE();
4256  return no_reg;
4257}
4258
4259#ifdef DEBUG
4260bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4261                Register reg5, Register reg6, Register reg7, Register reg8,
4262                Register reg9, Register reg10) {
4263  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4264                        reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4265                        reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
4266                        reg10.is_valid();
4267
4268  RegList regs = 0;
4269  if (reg1.is_valid()) regs |= reg1.bit();
4270  if (reg2.is_valid()) regs |= reg2.bit();
4271  if (reg3.is_valid()) regs |= reg3.bit();
4272  if (reg4.is_valid()) regs |= reg4.bit();
4273  if (reg5.is_valid()) regs |= reg5.bit();
4274  if (reg6.is_valid()) regs |= reg6.bit();
4275  if (reg7.is_valid()) regs |= reg7.bit();
4276  if (reg8.is_valid()) regs |= reg8.bit();
4277  if (reg9.is_valid()) regs |= reg9.bit();
4278  if (reg10.is_valid()) regs |= reg10.bit();
4279  int n_of_non_aliasing_regs = NumRegs(regs);
4280
4281  return n_of_valid_regs != n_of_non_aliasing_regs;
4282}
4283#endif
4284
4285
4286CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
4287                         FlushICache flush_cache)
4288    : address_(address),
4289      size_(instructions * Assembler::kInstrSize),
4290      masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
4291      flush_cache_(flush_cache) {
4292  // Create a new macro assembler pointing to the address of the code to patch.
4293  // The size is adjusted with kGap on order for the assembler to generate size
4294  // bytes of instructions without failing with buffer size constraints.
4295  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4296}
4297
4298
4299CodePatcher::~CodePatcher() {
4300  // Indicate that code has changed.
4301  if (flush_cache_ == FLUSH) {
4302    Assembler::FlushICache(masm_.isolate(), address_, size_);
4303  }
4304
4305  // Check that the code was patched as expected.
4306  DCHECK(masm_.pc_ == address_ + size_);
4307  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4308}
4309
4310
4311void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4312
4313
4314void CodePatcher::EmitCondition(Condition cond) {
4315  Instr instr = Assembler::instr_at(masm_.pc_);
4316  switch (cond) {
4317    case eq:
4318      instr = (instr & ~kCondMask) | BT;
4319      break;
4320    case ne:
4321      instr = (instr & ~kCondMask) | BF;
4322      break;
4323    default:
4324      UNIMPLEMENTED();
4325  }
4326  masm_.emit(instr);
4327}
4328
4329
4330void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4331                                   int32_t divisor) {
4332  DCHECK(!dividend.is(result));
4333  DCHECK(!dividend.is(r0));
4334  DCHECK(!result.is(r0));
4335  base::MagicNumbersForDivision<uint32_t> mag =
4336      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4337  mov(r0, Operand(mag.multiplier));
4338  mulhw(result, dividend, r0);
4339  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4340  if (divisor > 0 && neg) {
4341    add(result, result, dividend);
4342  }
4343  if (divisor < 0 && !neg && mag.multiplier > 0) {
4344    sub(result, result, dividend);
4345  }
4346  if (mag.shift > 0) srawi(result, result, mag.shift);
4347  ExtractBit(r0, dividend, 31);
4348  add(result, result, r0);
4349}
4350
4351}  // namespace internal
4352}  // namespace v8
4353
4354#endif  // V8_TARGET_ARCH_PPC
4355