macro-assembler-a64.cc revision 96d8f8356b6e4d9f9e9faa8a37013a316c6b7dd1
1// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "a64/macro-assembler-a64.h"
28namespace vixl {
29
30void MacroAssembler::And(const Register& rd,
31                         const Register& rn,
32                         const Operand& operand) {
33  VIXL_ASSERT(allow_macro_instructions_);
34  LogicalMacro(rd, rn, operand, AND);
35}
36
37
38void MacroAssembler::Ands(const Register& rd,
39                          const Register& rn,
40                          const Operand& operand) {
41  VIXL_ASSERT(allow_macro_instructions_);
42  LogicalMacro(rd, rn, operand, ANDS);
43}
44
45
46void MacroAssembler::Tst(const Register& rn,
47                         const Operand& operand) {
48  VIXL_ASSERT(allow_macro_instructions_);
49  Ands(AppropriateZeroRegFor(rn), rn, operand);
50}
51
52
53void MacroAssembler::Bic(const Register& rd,
54                         const Register& rn,
55                         const Operand& operand) {
56  VIXL_ASSERT(allow_macro_instructions_);
57  LogicalMacro(rd, rn, operand, BIC);
58}
59
60
61void MacroAssembler::Bics(const Register& rd,
62                          const Register& rn,
63                          const Operand& operand) {
64  VIXL_ASSERT(allow_macro_instructions_);
65  LogicalMacro(rd, rn, operand, BICS);
66}
67
68
69void MacroAssembler::Orr(const Register& rd,
70                         const Register& rn,
71                         const Operand& operand) {
72  VIXL_ASSERT(allow_macro_instructions_);
73  LogicalMacro(rd, rn, operand, ORR);
74}
75
76
77void MacroAssembler::Orn(const Register& rd,
78                         const Register& rn,
79                         const Operand& operand) {
80  VIXL_ASSERT(allow_macro_instructions_);
81  LogicalMacro(rd, rn, operand, ORN);
82}
83
84
85void MacroAssembler::Eor(const Register& rd,
86                         const Register& rn,
87                         const Operand& operand) {
88  VIXL_ASSERT(allow_macro_instructions_);
89  LogicalMacro(rd, rn, operand, EOR);
90}
91
92
93void MacroAssembler::Eon(const Register& rd,
94                         const Register& rn,
95                         const Operand& operand) {
96  VIXL_ASSERT(allow_macro_instructions_);
97  LogicalMacro(rd, rn, operand, EON);
98}
99
100
101void MacroAssembler::LogicalMacro(const Register& rd,
102                                  const Register& rn,
103                                  const Operand& operand,
104                                  LogicalOp op) {
105  if (operand.IsImmediate()) {
106    int64_t immediate = operand.immediate();
107    unsigned reg_size = rd.size();
108    VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
109
110    // If the operation is NOT, invert the operation and immediate.
111    if ((op & NOT) == NOT) {
112      op = static_cast<LogicalOp>(op & ~NOT);
113      immediate = ~immediate;
114      if (rd.Is32Bits()) {
115        immediate &= kWRegMask;
116      }
117    }
118
119    // Special cases for all set or all clear immediates.
120    if (immediate == 0) {
121      switch (op) {
122        case AND:
123          Mov(rd, 0);
124          return;
125        case ORR:  // Fall through.
126        case EOR:
127          Mov(rd, rn);
128          return;
129        case ANDS:  // Fall through.
130        case BICS:
131          break;
132        default:
133          VIXL_UNREACHABLE();
134      }
135    } else if ((rd.Is64Bits() && (immediate == -INT64_C(1))) ||
136               (rd.Is32Bits() && (immediate == INT64_C(0xffffffff)))) {
137      switch (op) {
138        case AND:
139          Mov(rd, rn);
140          return;
141        case ORR:
142          Mov(rd, immediate);
143          return;
144        case EOR:
145          Mvn(rd, rn);
146          return;
147        case ANDS:  // Fall through.
148        case BICS:
149          break;
150        default:
151          VIXL_UNREACHABLE();
152      }
153    }
154
155    unsigned n, imm_s, imm_r;
156    if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
157      // Immediate can be encoded in the instruction.
158      LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
159    } else {
160      // Immediate can't be encoded: synthesize using move immediate.
161      Register temp = AppropriateTempFor(rn);
162      Mov(temp, immediate);
163      if (rd.Is(sp)) {
164        // If rd is the stack pointer we cannot use it as the destination
165        // register so we use the temp register as an intermediate again.
166        Logical(temp, rn, Operand(temp), op);
167        Mov(sp, temp);
168      } else {
169        Logical(rd, rn, Operand(temp), op);
170      }
171    }
172  } else if (operand.IsExtendedRegister()) {
173    VIXL_ASSERT(operand.reg().size() <= rd.size());
174    // Add/sub extended supports shift <= 4. We want to support exactly the
175    // same modes here.
176    VIXL_ASSERT(operand.shift_amount() <= 4);
177    VIXL_ASSERT(operand.reg().Is64Bits() ||
178           ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
179    Register temp = AppropriateTempFor(rn, operand.reg());
180    EmitExtendShift(temp, operand.reg(), operand.extend(),
181                    operand.shift_amount());
182    Logical(rd, rn, Operand(temp), op);
183  } else {
184    // The operand can be encoded in the instruction.
185    VIXL_ASSERT(operand.IsShiftedRegister());
186    Logical(rd, rn, operand, op);
187  }
188}
189
190
191void MacroAssembler::Mov(const Register& rd,
192                         const Operand& operand,
193                         DiscardMoveMode discard_mode) {
194  VIXL_ASSERT(allow_macro_instructions_);
195  if (operand.IsImmediate()) {
196    // Call the macro assembler for generic immediates.
197    Mov(rd, operand.immediate());
198  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
199    // Emit a shift instruction if moving a shifted register. This operation
200    // could also be achieved using an orr instruction (like orn used by Mvn),
201    // but using a shift instruction makes the disassembly clearer.
202    EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
203  } else if (operand.IsExtendedRegister()) {
204    // Emit an extend instruction if moving an extended register. This handles
205    // extend with post-shift operations, too.
206    EmitExtendShift(rd, operand.reg(), operand.extend(),
207                    operand.shift_amount());
208  } else {
209    // Otherwise, emit a register move only if the registers are distinct, or
210    // if they are not X registers.
211    //
212    // Note that mov(w0, w0) is not a no-op because it clears the top word of
213    // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
214    // registers is not required to clear the top word of the X register. In
215    // this case, the instruction is discarded.
216    //
217    // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
218    if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
219                                  (discard_mode == kDontDiscardForSameWReg))) {
220      mov(rd, operand.reg());
221    }
222  }
223}
224
225
226void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
227  VIXL_ASSERT(allow_macro_instructions_);
228  if (operand.IsImmediate()) {
229    // Call the macro assembler for generic immediates.
230    Mvn(rd, operand.immediate());
231  } else if (operand.IsExtendedRegister()) {
232    // Emit two instructions for the extend case. This differs from Mov, as
233    // the extend and invert can't be achieved in one instruction.
234    Register temp = AppropriateTempFor(rd, operand.reg());
235    EmitExtendShift(temp, operand.reg(), operand.extend(),
236                    operand.shift_amount());
237    mvn(rd, Operand(temp));
238  } else {
239    // Otherwise, register and shifted register cases can be handled by the
240    // assembler directly, using orn.
241    mvn(rd, operand);
242  }
243}
244
245
246void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
247  VIXL_ASSERT(allow_macro_instructions_);
248  VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
249
250  // Immediates on Aarch64 can be produced using an initial value, and zero to
251  // three move keep operations.
252  //
253  // Initial values can be generated with:
254  //  1. 64-bit move zero (movz).
255  //  2. 32-bit move inverted (movn).
256  //  3. 64-bit move inverted.
257  //  4. 32-bit orr immediate.
258  //  5. 64-bit orr immediate.
259  // Move-keep may then be used to modify each of the 16-bit half words.
260  //
261  // The code below supports all five initial value generators, and
262  // applying move-keep operations to move-zero and move-inverted initial
263  // values.
264
265  unsigned reg_size = rd.size();
266  unsigned n, imm_s, imm_r;
267  if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
268    // Immediate can be represented in a move zero instruction. Movz can't
269    // write to the stack pointer.
270    movz(rd, imm);
271  } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
272    // Immediate can be represented in a move negative instruction. Movn can't
273    // write to the stack pointer.
274    movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
275  } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
276    // Immediate can be represented in a logical orr instruction.
277    VIXL_ASSERT(!rd.IsZero());
278    LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
279  } else {
280    // Generic immediate case. Imm will be represented by
281    //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
282    // A move-zero or move-inverted is generated for the first non-zero or
283    // non-0xffff immX, and a move-keep for subsequent non-zero immX.
284
285    uint64_t ignored_halfword = 0;
286    bool invert_move = false;
287    // If the number of 0xffff halfwords is greater than the number of 0x0000
288    // halfwords, it's more efficient to use move-inverted.
289    if (CountClearHalfWords(~imm, reg_size) >
290        CountClearHalfWords(imm, reg_size)) {
291      ignored_halfword = INT64_C(0xffff);
292      invert_move = true;
293    }
294
295    // Mov instructions can't move values into the stack pointer, so set up a
296    // temporary register, if needed.
297    Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
298
299    // Iterate through the halfwords. Use movn/movz for the first non-ignored
300    // halfword, and movk for subsequent halfwords.
301    VIXL_ASSERT((reg_size % 16) == 0);
302    bool first_mov_done = false;
303    for (unsigned i = 0; i < (temp.size() / 16); i++) {
304      uint64_t imm16 = (imm >> (16 * i)) & INT64_C(0xffff);
305      if (imm16 != ignored_halfword) {
306        if (!first_mov_done) {
307          if (invert_move) {
308            movn(temp, (~imm16) & INT64_C(0xffff), 16 * i);
309          } else {
310            movz(temp, imm16, 16 * i);
311          }
312          first_mov_done = true;
313        } else {
314          // Construct a wider constant.
315          movk(temp, imm16, 16 * i);
316        }
317      }
318    }
319
320    VIXL_ASSERT(first_mov_done);
321
322    // Move the temporary if the original destination register was the stack
323    // pointer.
324    if (rd.IsSP()) {
325      mov(rd, temp);
326    }
327  }
328}
329
330
331unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
332  VIXL_ASSERT((reg_size % 8) == 0);
333  int count = 0;
334  for (unsigned i = 0; i < (reg_size / 16); i++) {
335    if ((imm & 0xffff) == 0) {
336      count++;
337    }
338    imm >>= 16;
339  }
340  return count;
341}
342
343
344// The movn instruction can generate immediates containing an arbitrary 16-bit
345// value, with remaining bits set, eg. 0x00001234, 0x0000123400000000.
346bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
347  VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
348  return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
349}
350
351
352// The movn instruction can generate immediates containing an arbitrary 16-bit
353// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
354bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
355  return IsImmMovz(~imm, reg_size);
356}
357
358
359void MacroAssembler::Ccmp(const Register& rn,
360                          const Operand& operand,
361                          StatusFlags nzcv,
362                          Condition cond) {
363  VIXL_ASSERT(allow_macro_instructions_);
364  if (operand.IsImmediate() && (operand.immediate() < 0)) {
365    ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
366  } else {
367    ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
368  }
369}
370
371
372void MacroAssembler::Ccmn(const Register& rn,
373                          const Operand& operand,
374                          StatusFlags nzcv,
375                          Condition cond) {
376  VIXL_ASSERT(allow_macro_instructions_);
377  if (operand.IsImmediate() && (operand.immediate() < 0)) {
378    ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
379  } else {
380    ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
381  }
382}
383
384
385void MacroAssembler::ConditionalCompareMacro(const Register& rn,
386                                             const Operand& operand,
387                                             StatusFlags nzcv,
388                                             Condition cond,
389                                             ConditionalCompareOp op) {
390  VIXL_ASSERT((cond != al) && (cond != nv));
391  if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
392      (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
393    // The immediate can be encoded in the instruction, or the operand is an
394    // unshifted register: call the assembler.
395    ConditionalCompare(rn, operand, nzcv, cond, op);
396  } else {
397    // The operand isn't directly supported by the instruction: perform the
398    // operation on a temporary register.
399    Register temp = AppropriateTempFor(rn);
400    Mov(temp, operand);
401    ConditionalCompare(rn, temp, nzcv, cond, op);
402  }
403}
404
405
406void MacroAssembler::Csel(const Register& rd,
407                          const Register& rn,
408                          const Operand& operand,
409                          Condition cond) {
410  VIXL_ASSERT(allow_macro_instructions_);
411  VIXL_ASSERT(!rd.IsZero());
412  VIXL_ASSERT(!rn.IsZero());
413  VIXL_ASSERT((cond != al) && (cond != nv));
414  if (operand.IsImmediate()) {
415    // Immediate argument. Handle special cases of 0, 1 and -1 using zero
416    // register.
417    int64_t imm = operand.immediate();
418    Register zr = AppropriateZeroRegFor(rn);
419    if (imm == 0) {
420      csel(rd, rn, zr, cond);
421    } else if (imm == 1) {
422      csinc(rd, rn, zr, cond);
423    } else if (imm == -1) {
424      csinv(rd, rn, zr, cond);
425    } else {
426      Register temp = AppropriateTempFor(rn);
427      Mov(temp, operand.immediate());
428      csel(rd, rn, temp, cond);
429    }
430  } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
431    // Unshifted register argument.
432    csel(rd, rn, operand.reg(), cond);
433  } else {
434    // All other arguments.
435    Register temp = AppropriateTempFor(rn);
436    Mov(temp, operand);
437    csel(rd, rn, temp, cond);
438  }
439}
440
441
442void MacroAssembler::Add(const Register& rd,
443                         const Register& rn,
444                         const Operand& operand) {
445  VIXL_ASSERT(allow_macro_instructions_);
446  if (operand.IsImmediate() && (operand.immediate() < 0)) {
447    AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
448  } else {
449    AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
450  }
451}
452
453
454void MacroAssembler::Adds(const Register& rd,
455                          const Register& rn,
456                          const Operand& operand) {
457  VIXL_ASSERT(allow_macro_instructions_);
458  if (operand.IsImmediate() && (operand.immediate() < 0)) {
459    AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
460  } else {
461    AddSubMacro(rd, rn, operand, SetFlags, ADD);
462  }
463}
464
465
466void MacroAssembler::Sub(const Register& rd,
467                         const Register& rn,
468                         const Operand& operand) {
469  VIXL_ASSERT(allow_macro_instructions_);
470  if (operand.IsImmediate() && (operand.immediate() < 0)) {
471    AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
472  } else {
473    AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
474  }
475}
476
477
478void MacroAssembler::Subs(const Register& rd,
479                          const Register& rn,
480                          const Operand& operand) {
481  VIXL_ASSERT(allow_macro_instructions_);
482  if (operand.IsImmediate() && (operand.immediate() < 0)) {
483    AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
484  } else {
485    AddSubMacro(rd, rn, operand, SetFlags, SUB);
486  }
487}
488
489
490void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
491  VIXL_ASSERT(allow_macro_instructions_);
492  Adds(AppropriateZeroRegFor(rn), rn, operand);
493}
494
495
496void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
497  VIXL_ASSERT(allow_macro_instructions_);
498  Subs(AppropriateZeroRegFor(rn), rn, operand);
499}
500
501
502void MacroAssembler::Neg(const Register& rd,
503                         const Operand& operand) {
504  VIXL_ASSERT(allow_macro_instructions_);
505  if (operand.IsImmediate()) {
506    Mov(rd, -operand.immediate());
507  } else {
508    Sub(rd, AppropriateZeroRegFor(rd), operand);
509  }
510}
511
512
513void MacroAssembler::Negs(const Register& rd,
514                          const Operand& operand) {
515  VIXL_ASSERT(allow_macro_instructions_);
516  Subs(rd, AppropriateZeroRegFor(rd), operand);
517}
518
519
520void MacroAssembler::AddSubMacro(const Register& rd,
521                                 const Register& rn,
522                                 const Operand& operand,
523                                 FlagsUpdate S,
524                                 AddSubOp op) {
525  if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
526      (S == LeaveFlags)) {
527    // The instruction would be a nop. Avoid generating useless code.
528    return;
529  }
530
531  if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
532      (rn.IsZero() && !operand.IsShiftedRegister())                ||
533      (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
534    Register temp = AppropriateTempFor(rn);
535    Mov(temp, operand);
536    AddSub(rd, rn, temp, S, op);
537  } else {
538    AddSub(rd, rn, operand, S, op);
539  }
540}
541
542
543void MacroAssembler::Adc(const Register& rd,
544                         const Register& rn,
545                         const Operand& operand) {
546  VIXL_ASSERT(allow_macro_instructions_);
547  AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
548}
549
550
551void MacroAssembler::Adcs(const Register& rd,
552                          const Register& rn,
553                          const Operand& operand) {
554  VIXL_ASSERT(allow_macro_instructions_);
555  AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
556}
557
558
559void MacroAssembler::Sbc(const Register& rd,
560                         const Register& rn,
561                         const Operand& operand) {
562  VIXL_ASSERT(allow_macro_instructions_);
563  AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
564}
565
566
567void MacroAssembler::Sbcs(const Register& rd,
568                          const Register& rn,
569                          const Operand& operand) {
570  VIXL_ASSERT(allow_macro_instructions_);
571  AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
572}
573
574
575void MacroAssembler::Ngc(const Register& rd,
576                         const Operand& operand) {
577  VIXL_ASSERT(allow_macro_instructions_);
578  Register zr = AppropriateZeroRegFor(rd);
579  Sbc(rd, zr, operand);
580}
581
582
583void MacroAssembler::Ngcs(const Register& rd,
584                         const Operand& operand) {
585  VIXL_ASSERT(allow_macro_instructions_);
586  Register zr = AppropriateZeroRegFor(rd);
587  Sbcs(rd, zr, operand);
588}
589
590
591void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
592                                          const Register& rn,
593                                          const Operand& operand,
594                                          FlagsUpdate S,
595                                          AddSubWithCarryOp op) {
596  VIXL_ASSERT(rd.size() == rn.size());
597
598  if (operand.IsImmediate() ||
599      (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
600    // Add/sub with carry (immediate or ROR shifted register.)
601    Register temp = AppropriateTempFor(rn);
602    Mov(temp, operand);
603    AddSubWithCarry(rd, rn, Operand(temp), S, op);
604  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
605    // Add/sub with carry (shifted register).
606    VIXL_ASSERT(operand.reg().size() == rd.size());
607    VIXL_ASSERT(operand.shift() != ROR);
608    VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
609                    operand.shift_amount()));
610    Register temp = AppropriateTempFor(rn, operand.reg());
611    EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
612    AddSubWithCarry(rd, rn, Operand(temp), S, op);
613  } else if (operand.IsExtendedRegister()) {
614    // Add/sub with carry (extended register).
615    VIXL_ASSERT(operand.reg().size() <= rd.size());
616    // Add/sub extended supports a shift <= 4. We want to support exactly the
617    // same modes.
618    VIXL_ASSERT(operand.shift_amount() <= 4);
619    VIXL_ASSERT(operand.reg().Is64Bits() ||
620           ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
621    Register temp = AppropriateTempFor(rn, operand.reg());
622    EmitExtendShift(temp, operand.reg(), operand.extend(),
623                    operand.shift_amount());
624    AddSubWithCarry(rd, rn, Operand(temp), S, op);
625  } else {
626    // The addressing mode is directly supported by the instruction.
627    AddSubWithCarry(rd, rn, operand, S, op);
628  }
629}
630
631
632#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP)                         \
633void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) {  \
634  LoadStoreMacro(REG, addr, OP);                                      \
635}
636LS_MACRO_LIST(DEFINE_FUNCTION)
637#undef DEFINE_FUNCTION
638
639void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
640                                    const MemOperand& addr,
641                                    LoadStoreOp op) {
642  int64_t offset = addr.offset();
643  LSDataSize size = CalcLSDataSize(op);
644
645  // Check if an immediate offset fits in the immediate field of the
646  // appropriate instruction. If not, emit two instructions to perform
647  // the operation.
648  if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
649      !IsImmLSUnscaled(offset)) {
650    // Immediate offset that can't be encoded using unsigned or unscaled
651    // addressing modes.
652    Register temp = AppropriateTempFor(addr.base());
653    Mov(temp, addr.offset());
654    LoadStore(rt, MemOperand(addr.base(), temp), op);
655  } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
656    // Post-index beyond unscaled addressing range.
657    LoadStore(rt, MemOperand(addr.base()), op);
658    Add(addr.base(), addr.base(), Operand(offset));
659  } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
660    // Pre-index beyond unscaled addressing range.
661    Add(addr.base(), addr.base(), Operand(offset));
662    LoadStore(rt, MemOperand(addr.base()), op);
663  } else {
664    // Encodable in one load/store instruction.
665    LoadStore(rt, addr, op);
666  }
667}
668
669
670void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
671                          const CPURegister& src2, const CPURegister& src3) {
672  VIXL_ASSERT(allow_macro_instructions_);
673  VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
674  VIXL_ASSERT(src0.IsValid());
675
676  int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
677  int size = src0.SizeInBytes();
678
679  PrepareForPush(count, size);
680  PushHelper(count, size, src0, src1, src2, src3);
681}
682
683
684void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
685                         const CPURegister& dst2, const CPURegister& dst3) {
686  // It is not valid to pop into the same register more than once in one
687  // instruction, not even into the zero register.
688  VIXL_ASSERT(allow_macro_instructions_);
689  VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
690  VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
691  VIXL_ASSERT(dst0.IsValid());
692
693  int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
694  int size = dst0.SizeInBytes();
695
696  PrepareForPop(count, size);
697  PopHelper(count, size, dst0, dst1, dst2, dst3);
698}
699
700
701void MacroAssembler::PushCPURegList(CPURegList registers) {
702  int size = registers.RegisterSizeInBytes();
703
704  PrepareForPush(registers.Count(), size);
705  // Push up to four registers at a time because if the current stack pointer is
706  // sp and reg_size is 32, registers must be pushed in blocks of four in order
707  // to maintain the 16-byte alignment for sp.
708  VIXL_ASSERT(allow_macro_instructions_);
709  while (!registers.IsEmpty()) {
710    int count_before = registers.Count();
711    const CPURegister& src0 = registers.PopHighestIndex();
712    const CPURegister& src1 = registers.PopHighestIndex();
713    const CPURegister& src2 = registers.PopHighestIndex();
714    const CPURegister& src3 = registers.PopHighestIndex();
715    int count = count_before - registers.Count();
716    PushHelper(count, size, src0, src1, src2, src3);
717  }
718}
719
720
721void MacroAssembler::PopCPURegList(CPURegList registers) {
722  int size = registers.RegisterSizeInBytes();
723
724  PrepareForPop(registers.Count(), size);
725  // Pop up to four registers at a time because if the current stack pointer is
726  // sp and reg_size is 32, registers must be pushed in blocks of four in order
727  // to maintain the 16-byte alignment for sp.
728  VIXL_ASSERT(allow_macro_instructions_);
729  while (!registers.IsEmpty()) {
730    int count_before = registers.Count();
731    const CPURegister& dst0 = registers.PopLowestIndex();
732    const CPURegister& dst1 = registers.PopLowestIndex();
733    const CPURegister& dst2 = registers.PopLowestIndex();
734    const CPURegister& dst3 = registers.PopLowestIndex();
735    int count = count_before - registers.Count();
736    PopHelper(count, size, dst0, dst1, dst2, dst3);
737  }
738}
739
740
741void MacroAssembler::PushMultipleTimes(int count, Register src) {
742  VIXL_ASSERT(allow_macro_instructions_);
743  int size = src.SizeInBytes();
744
745  PrepareForPush(count, size);
746  // Push up to four registers at a time if possible because if the current
747  // stack pointer is sp and the register size is 32, registers must be pushed
748  // in blocks of four in order to maintain the 16-byte alignment for sp.
749  while (count >= 4) {
750    PushHelper(4, size, src, src, src, src);
751    count -= 4;
752  }
753  if (count >= 2) {
754    PushHelper(2, size, src, src, NoReg, NoReg);
755    count -= 2;
756  }
757  if (count == 1) {
758    PushHelper(1, size, src, NoReg, NoReg, NoReg);
759    count -= 1;
760  }
761  VIXL_ASSERT(count == 0);
762}
763
764
765void MacroAssembler::PushHelper(int count, int size,
766                                const CPURegister& src0,
767                                const CPURegister& src1,
768                                const CPURegister& src2,
769                                const CPURegister& src3) {
770  // Ensure that we don't unintentionally modify scratch or debug registers.
771  InstructionAccurateScope scope(this);
772
773  VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
774  VIXL_ASSERT(size == src0.SizeInBytes());
775
776  // When pushing multiple registers, the store order is chosen such that
777  // Push(a, b) is equivalent to Push(a) followed by Push(b).
778  switch (count) {
779    case 1:
780      VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
781      str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
782      break;
783    case 2:
784      VIXL_ASSERT(src2.IsNone() && src3.IsNone());
785      stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
786      break;
787    case 3:
788      VIXL_ASSERT(src3.IsNone());
789      stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
790      str(src0, MemOperand(StackPointer(), 2 * size));
791      break;
792    case 4:
793      // Skip over 4 * size, then fill in the gap. This allows four W registers
794      // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
795      // all times.
796      stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
797      stp(src1, src0, MemOperand(StackPointer(), 2 * size));
798      break;
799    default:
800      VIXL_UNREACHABLE();
801  }
802}
803
804
805void MacroAssembler::PopHelper(int count, int size,
806                               const CPURegister& dst0,
807                               const CPURegister& dst1,
808                               const CPURegister& dst2,
809                               const CPURegister& dst3) {
810  // Ensure that we don't unintentionally modify scratch or debug registers.
811  InstructionAccurateScope scope(this);
812
813  VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
814  VIXL_ASSERT(size == dst0.SizeInBytes());
815
816  // When popping multiple registers, the load order is chosen such that
817  // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
818  switch (count) {
819    case 1:
820      VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
821      ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
822      break;
823    case 2:
824      VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
825      ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
826      break;
827    case 3:
828      VIXL_ASSERT(dst3.IsNone());
829      ldr(dst2, MemOperand(StackPointer(), 2 * size));
830      ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
831      break;
832    case 4:
833      // Load the higher addresses first, then load the lower addresses and skip
834      // the whole block in the second instruction. This allows four W registers
835      // to be popped using sp, whilst maintaining 16-byte alignment for sp at
836      // all times.
837      ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
838      ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
839      break;
840    default:
841      VIXL_UNREACHABLE();
842  }
843}
844
845
846void MacroAssembler::PrepareForPush(int count, int size) {
847  if (sp.Is(StackPointer())) {
848    // If the current stack pointer is sp, then it must be aligned to 16 bytes
849    // on entry and the total size of the specified registers must also be a
850    // multiple of 16 bytes.
851    VIXL_ASSERT((count * size) % 16 == 0);
852  } else {
853    // Even if the current stack pointer is not the system stack pointer (sp),
854    // the system stack pointer will still be modified in order to comply with
855    // ABI rules about accessing memory below the system stack pointer.
856    BumpSystemStackPointer(count * size);
857  }
858}
859
860
861void MacroAssembler::PrepareForPop(int count, int size) {
862  USE(count);
863  USE(size);
864  if (sp.Is(StackPointer())) {
865    // If the current stack pointer is sp, then it must be aligned to 16 bytes
866    // on entry and the total size of the specified registers must also be a
867    // multiple of 16 bytes.
868    VIXL_ASSERT((count * size) % 16 == 0);
869  }
870}
871
872void MacroAssembler::Poke(const Register& src, const Operand& offset) {
873  VIXL_ASSERT(allow_macro_instructions_);
874  if (offset.IsImmediate()) {
875    VIXL_ASSERT(offset.immediate() >= 0);
876  }
877
878  Str(src, MemOperand(StackPointer(), offset));
879}
880
881
882void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
883  VIXL_ASSERT(allow_macro_instructions_);
884  if (offset.IsImmediate()) {
885    VIXL_ASSERT(offset.immediate() >= 0);
886  }
887
888  Ldr(dst, MemOperand(StackPointer(), offset));
889}
890
891
892void MacroAssembler::Claim(const Operand& size) {
893  VIXL_ASSERT(allow_macro_instructions_);
894
895  if (size.IsZero()) {
896    return;
897  }
898
899  if (size.IsImmediate()) {
900    VIXL_ASSERT(size.immediate() > 0);
901    if (sp.Is(StackPointer())) {
902      VIXL_ASSERT((size.immediate() % 16) == 0);
903    }
904  }
905
906  if (!sp.Is(StackPointer())) {
907    BumpSystemStackPointer(size);
908  }
909
910  Sub(StackPointer(), StackPointer(), size);
911}
912
913
914void MacroAssembler::Drop(const Operand& size) {
915  VIXL_ASSERT(allow_macro_instructions_);
916
917  if (size.IsZero()) {
918    return;
919  }
920
921  if (size.IsImmediate()) {
922    VIXL_ASSERT(size.immediate() > 0);
923    if (sp.Is(StackPointer())) {
924      VIXL_ASSERT((size.immediate() % 16) == 0);
925    }
926  }
927
928  Add(StackPointer(), StackPointer(), size);
929}
930
931
932void MacroAssembler::PushCalleeSavedRegisters() {
933  // Ensure that the macro-assembler doesn't use any scratch registers.
934  InstructionAccurateScope scope(this);
935
936  // This method must not be called unless the current stack pointer is sp.
937  VIXL_ASSERT(sp.Is(StackPointer()));
938
939  MemOperand tos(sp, -2 * kXRegSizeInBytes, PreIndex);
940
941  stp(d14, d15, tos);
942  stp(d12, d13, tos);
943  stp(d10, d11, tos);
944  stp(d8, d9, tos);
945
946  stp(x29, x30, tos);
947  stp(x27, x28, tos);
948  stp(x25, x26, tos);
949  stp(x23, x24, tos);
950  stp(x21, x22, tos);
951  stp(x19, x20, tos);
952}
953
954
955void MacroAssembler::PopCalleeSavedRegisters() {
956  // Ensure that the macro-assembler doesn't use any scratch registers.
957  InstructionAccurateScope scope(this);
958
959  // This method must not be called unless the current stack pointer is sp.
960  VIXL_ASSERT(sp.Is(StackPointer()));
961
962  MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
963
964  ldp(x19, x20, tos);
965  ldp(x21, x22, tos);
966  ldp(x23, x24, tos);
967  ldp(x25, x26, tos);
968  ldp(x27, x28, tos);
969  ldp(x29, x30, tos);
970
971  ldp(d8, d9, tos);
972  ldp(d10, d11, tos);
973  ldp(d12, d13, tos);
974  ldp(d14, d15, tos);
975}
976
977void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
978  VIXL_ASSERT(!sp.Is(StackPointer()));
979  // TODO: Several callers rely on this not using scratch registers, so we use
980  // the assembler directly here. However, this means that large immediate
981  // values of 'space' cannot be handled.
982  InstructionAccurateScope scope(this);
983  sub(sp, StackPointer(), space);
984}
985
986
987// This is the main Printf implementation. All callee-saved registers are
988// preserved, but NZCV and the caller-saved registers may be clobbered.
989void MacroAssembler::PrintfNoPreserve(const char * format,
990                                      const CPURegister& arg0,
991                                      const CPURegister& arg1,
992                                      const CPURegister& arg2,
993                                      const CPURegister& arg3) {
994  // We cannot handle a caller-saved stack pointer. It doesn't make much sense
995  // in most cases anyway, so this restriction shouldn't be too serious.
996  VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
997
998  // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
999  // assembler. We cannot print the stack pointer because it is typically used
1000  // to preserve caller-saved registers (using other Printf variants which
1001  // depend on this helper).
1002  VIXL_ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
1003  VIXL_ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
1004  VIXL_ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
1005  VIXL_ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
1006
1007  static const int kMaxArgCount = 4;
1008  // Assume that we have the maximum number of arguments until we know
1009  // otherwise.
1010  int arg_count = kMaxArgCount;
1011
1012  // The provided arguments.
1013  CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
1014
1015  // The PCS registers where the arguments need to end up.
1016  CPURegister pcs[kMaxArgCount];
1017
1018  // Promote FP arguments to doubles, and integer arguments to X registers.
1019  // Note that FP and integer arguments cannot be mixed, but we'll check
1020  // AreSameSizeAndType once we've processed these promotions.
1021  for (int i = 0; i < kMaxArgCount; i++) {
1022    if (args[i].IsRegister()) {
1023      // Note that we use x1 onwards, because x0 will hold the format string.
1024      pcs[i] = Register::XRegFromCode(i + 1);
1025      // For simplicity, we handle all integer arguments as X registers. An X
1026      // register argument takes the same space as a W register argument in the
1027      // PCS anyway. The only limitation is that we must explicitly clear the
1028      // top word for W register arguments as the callee will expect it to be
1029      // clear.
1030      if (!args[i].Is64Bits()) {
1031        const Register& as_x = args[i].X();
1032        And(as_x, as_x, 0x00000000ffffffff);
1033        args[i] = as_x;
1034      }
1035    } else if (args[i].IsFPRegister()) {
1036      pcs[i] = FPRegister::DRegFromCode(i);
1037      // C and C++ varargs functions (such as printf) implicitly promote float
1038      // arguments to doubles.
1039      if (!args[i].Is64Bits()) {
1040        FPRegister s(args[i]);
1041        const FPRegister& as_d = args[i].D();
1042        Fcvt(as_d, s);
1043        args[i] = as_d;
1044      }
1045    } else {
1046      // This is the first empty (NoCPUReg) argument, so use it to set the
1047      // argument count and bail out.
1048      arg_count = i;
1049      break;
1050    }
1051  }
1052  VIXL_ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
1053  // Check that every remaining argument is NoCPUReg.
1054  for (int i = arg_count; i < kMaxArgCount; i++) {
1055    VIXL_ASSERT(args[i].IsNone());
1056  }
1057  VIXL_ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
1058                                                args[2], args[3],
1059                                                pcs[0], pcs[1],
1060                                                pcs[2], pcs[3]));
1061
1062  // Move the arguments into the appropriate PCS registers.
1063  //
1064  // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
1065  // surprisingly complicated.
1066  //
1067  //  * For even numbers of registers, we push the arguments and then pop them
1068  //    into their final registers. This maintains 16-byte stack alignment in
1069  //    case sp is the stack pointer, since we're only handling X or D registers
1070  //    at this point.
1071  //
1072  //  * For odd numbers of registers, we push and pop all but one register in
1073  //    the same way, but the left-over register is moved directly, since we
1074  //    can always safely move one register without clobbering any source.
1075  if (arg_count >= 4) {
1076    Push(args[3], args[2], args[1], args[0]);
1077  } else if (arg_count >= 2) {
1078    Push(args[1], args[0]);
1079  }
1080
1081  if ((arg_count % 2) != 0) {
1082    // Move the left-over register directly.
1083    const CPURegister& leftover_arg = args[arg_count - 1];
1084    const CPURegister& leftover_pcs = pcs[arg_count - 1];
1085    if (leftover_arg.IsRegister()) {
1086      Mov(Register(leftover_pcs), Register(leftover_arg));
1087    } else {
1088      Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
1089    }
1090  }
1091
1092  if (arg_count >= 4) {
1093    Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
1094  } else if (arg_count >= 2) {
1095    Pop(pcs[0], pcs[1]);
1096  }
1097
1098  // Load the format string into x0, as per the procedure-call standard.
1099  //
1100  // To make the code as portable as possible, the format string is encoded
1101  // directly in the instruction stream. It might be cleaner to encode it in a
1102  // literal pool, but since Printf is usually used for debugging, it is
1103  // beneficial for it to be minimally dependent on other features.
1104  Label format_address;
1105  Adr(x0, &format_address);
1106
1107  // Emit the format string directly in the instruction stream.
1108  { BlockLiteralPoolScope scope(this);
1109    Label after_data;
1110    B(&after_data);
1111    Bind(&format_address);
1112    EmitStringData(format);
1113    Unreachable();
1114    Bind(&after_data);
1115  }
1116
1117  // We don't pass any arguments on the stack, but we still need to align the C
1118  // stack pointer to a 16-byte boundary for PCS compliance.
1119  if (!sp.Is(StackPointer())) {
1120    Bic(sp, StackPointer(), 0xf);
1121  }
1122
1123  // Actually call printf. This part needs special handling for the simulator,
1124  // since the system printf function will use a different instruction set and
1125  // the procedure-call standard will not be compatible.
1126#ifdef USE_SIMULATOR
1127  { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
1128    hlt(kPrintfOpcode);
1129    dc32(pcs[0].type());
1130  }
1131#else
1132  Mov(Tmp0(), reinterpret_cast<uintptr_t>(printf));
1133  Blr(Tmp0());
1134#endif
1135}
1136
1137
1138void MacroAssembler::Printf(const char * format,
1139                            const CPURegister& arg0,
1140                            const CPURegister& arg1,
1141                            const CPURegister& arg2,
1142                            const CPURegister& arg3) {
1143  // Preserve all caller-saved registers as well as NZCV.
1144  // If sp is the stack pointer, PushCPURegList asserts that the size of each
1145  // list is a multiple of 16 bytes.
1146  PushCPURegList(kCallerSaved);
1147  PushCPURegList(kCallerSavedFP);
1148  // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
1149  // never overlap an argument register.
1150  Mrs(Tmp0(), NZCV);
1151  Push(Tmp0(), xzr);
1152
1153  PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
1154
1155  Pop(xzr, Tmp0());
1156  Msr(NZCV, Tmp0());
1157  PopCPURegList(kCallerSavedFP);
1158  PopCPURegList(kCallerSaved);
1159}
1160
1161void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
1162  VIXL_ASSERT(allow_macro_instructions_);
1163
1164#ifdef USE_SIMULATOR
1165  // The arguments to the trace pseudo instruction need to be contiguous in
1166  // memory, so make sure we don't try to emit a literal pool.
1167  InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
1168
1169  Label start;
1170  bind(&start);
1171
1172  // Refer to instructions-a64.h for a description of the marker and its
1173  // arguments.
1174  hlt(kTraceOpcode);
1175
1176  VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
1177  dc32(parameters);
1178
1179  VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
1180  dc32(command);
1181#else
1182  // Emit nothing on real hardware.
1183  USE(parameters);
1184  USE(command);
1185#endif
1186}
1187
1188
1189void MacroAssembler::Log(TraceParameters parameters) {
1190  VIXL_ASSERT(allow_macro_instructions_);
1191
1192#ifdef USE_SIMULATOR
1193  // The arguments to the log pseudo instruction need to be contiguous in
1194  // memory, so make sure we don't try to emit a literal pool.
1195  InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
1196
1197  Label start;
1198  bind(&start);
1199
1200  // Refer to instructions-a64.h for a description of the marker and its
1201  // arguments.
1202  hlt(kLogOpcode);
1203
1204  VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
1205  dc32(parameters);
1206#else
1207  // Emit nothing on real hardware.
1208  USE(parameters);
1209#endif
1210}
1211
1212
1213void MacroAssembler::EnableInstrumentation() {
1214  VIXL_ASSERT(!isprint(InstrumentStateEnable));
1215  InstructionAccurateScope scope(this, 1);
1216  movn(xzr, InstrumentStateEnable);
1217}
1218
1219
1220void MacroAssembler::DisableInstrumentation() {
1221  VIXL_ASSERT(!isprint(InstrumentStateDisable));
1222  InstructionAccurateScope scope(this, 1);
1223  movn(xzr, InstrumentStateDisable);
1224}
1225
1226
1227void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
1228  VIXL_ASSERT(strlen(marker_name) == 2);
1229
1230  // We allow only printable characters in the marker names. Unprintable
1231  // characters are reserved for controlling features of the instrumentation.
1232  VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1233
1234  InstructionAccurateScope scope(this, 1);
1235  movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1236}
1237
1238}  // namespace vixl
1239