test-assembler-aarch32.cc revision be370b630bc10bf728ed662594ca09d42e254192
1// Copyright 2015, VIXL authors
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include <cstdio>
28#include <string>
29#include <iostream>
30
31#include "test-runner.h"
32#include "test-utils.h"
33#include "aarch32/test-utils-aarch32.h"
34
35#include "aarch32/macro-assembler-aarch32.h"
36#include "aarch32/disasm-aarch32.h"
37
38namespace vixl {
39namespace aarch32 {
40
41#define STRINGIFY(x) #x
42
43// Tests declared with this macro will be run twice: once targeting A32 and
44// once targeting T32.
45#define TEST(Name)                                                \
46void Test##Name##Impl(InstructionSet isa);                        \
47void Test##Name() {                                               \
48  Test##Name##Impl(A32);                                          \
49  printf(" > A32 done\n");                                        \
50  Test##Name##Impl(T32);                                          \
51  printf(" > T32 done\n");                                        \
52}                                                                 \
53Test test_##Name(STRINGIFY(AARCH32_ASM_##Name), &Test##Name);     \
54void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
55
56// Test declared with this macro will only target A32.
57#define TEST_A32(Name)                                            \
58void Test##Name##Impl(InstructionSet isa);                        \
59void Test##Name() {                                               \
60  Test##Name##Impl(A32);                                          \
61}                                                                 \
62Test test_##Name(STRINGIFY(AARCH32_A32_##Name), &Test##Name);     \
63void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
64
65// Tests declared with this macro will only target T32.
66#define TEST_T32(Name)                                            \
67void Test##Name##Impl(InstructionSet isa);                        \
68void Test##Name() {                                               \
69  Test##Name##Impl(T32);                                          \
70}                                                                 \
71Test test_##Name(STRINGIFY(AARCH32_T32_##Name), &Test##Name);     \
72void Test##Name##Impl(InstructionSet isa __attribute__((unused)))
73
74// Tests declared with this macro are not expected to use any provided test
75// helpers such as SETUP, RUN, etc.
76#define TEST_NOASM(Name)                                   \
77void Test##Name();                                         \
78Test test_##Name(STRINGIFY(AARCH32_##Name), &Test##Name);  \
79void Test##Name()
80
81#define __ masm.
82#define BUF_SIZE (4096)
83
84#define ASSERT_LITERAL_POOL_SIZE(size) \
85    do { VIXL_CHECK(__ GetLiteralPoolSize() == size); } while (false)
86
87#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
88// No simulator yet.
89
90#define SETUP()                                             \
91  MacroAssembler masm(BUF_SIZE, isa);                       \
92
93#define START() \
94  masm.GetBuffer()->Reset();
95
96#define END()                                                                  \
97  __ Hlt(0);                                                                   \
98  __ FinalizeCode();
99
100#define RUN()                                                                  \
101  DISASSEMBLE();
102
103#define TEARDOWN()
104
105#else  // ifdef VIXL_INCLUDE_SIMULATOR_AARCH32.
106
107#define SETUP()                                                                \
108  RegisterDump core;                                                           \
109  MacroAssembler masm(BUF_SIZE, isa);                                          \
110
111#define START()                                                                \
112  masm.GetBuffer()->Reset();                                                   \
113  __ Push(r4);                                                                 \
114  __ Push(r5);                                                                 \
115  __ Push(r6);                                                                 \
116  __ Push(r7);                                                                 \
117  __ Push(r8);                                                                 \
118  __ Push(r9);                                                                 \
119  __ Push(r10);                                                                \
120  __ Push(r11);                                                                \
121  __ Push(r12);                                                                \
122  __ Mov(r0, 0);                                                               \
123  __ Msr(APSR_nzcvq, r0);
124
125#define END()                                                                  \
126  core.Dump(&masm);                                                            \
127  __ Pop(r12);                                                                 \
128  __ Pop(r11);                                                                 \
129  __ Pop(r10);                                                                 \
130  __ Pop(r9);                                                                  \
131  __ Pop(r8);                                                                  \
132  __ Pop(r7);                                                                  \
133  __ Pop(r6);                                                                  \
134  __ Pop(r5);                                                                  \
135  __ Pop(r4);                                                                  \
136  __ Bx(lr);                                                                   \
137  __ FinalizeCode();
138
139// Execute the generated code from the MacroAssembler's automatic code buffer.
140// Note the offset for ExecuteMemory since the PCS requires that
141// the address be odd in the case of branching to T32 code.
142#define RUN()                                                                  \
143  DISASSEMBLE();                                                               \
144  {                                                                            \
145    int pcs_offset = masm.IsUsingT32() ? 1 : 0;                                \
146    masm.GetBuffer()->SetExecutable();                                         \
147    ExecuteMemory(masm.GetBuffer()->GetStartAddress<byte*>(),                  \
148                  masm.GetSizeOfCodeGenerated(),                               \
149                  pcs_offset);                                                 \
150    masm.GetBuffer()->SetWritable();                                           \
151  }
152
153#define TEARDOWN()
154
155#endif  // ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
156
157#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
158// No simulator yet. We can't test the results.
159
160#define ASSERT_EQUAL_32(expected, result)
161
162#define ASSERT_EQUAL_64(expected, result)
163
164#define ASSERT_EQUAL_128(expected_h, expected_l, result)
165
166#define ASSERT_EQUAL_FP32(expected, result)
167
168#define ASSERT_EQUAL_FP64(expected, result)
169
170#define ASSERT_EQUAL_NZCV(expected)
171
172#else
173
174#define ASSERT_EQUAL_32(expected, result)                                      \
175  VIXL_CHECK(Equal32(expected, &core, result))
176
177#define ASSERT_EQUAL_64(expected, result)                                      \
178  VIXL_CHECK(Equal64(expected, &core, result))
179
180#define ASSERT_EQUAL_128(expected_h, expected_l, result)                       \
181  VIXL_CHECK(Equal128(expected_h, expected_l, &core, result))
182
183#define ASSERT_EQUAL_FP32(expected, result)                                    \
184  VIXL_CHECK(EqualFP32(expected, &core, result))
185
186#define ASSERT_EQUAL_FP64(expected, result)                                    \
187  VIXL_CHECK(EqualFP64(expected, &core, result))
188
189#define ASSERT_EQUAL_NZCV(expected)                                            \
190  VIXL_CHECK(EqualNzcv(expected, core.flags_nzcv()))
191
192#endif
193
194#define DISASSEMBLE() \
195  if (Test::disassemble()) {                                                   \
196    PrintDisassembler dis(std::cout, 0);                                       \
197    if (masm.IsUsingT32()) {                                                   \
198      dis.DisassembleT32Buffer(masm.GetBuffer()->GetStartAddress<uint16_t*>(), \
199                               masm.GetCursorOffset());                        \
200    } else {                                                                   \
201      dis.DisassembleA32Buffer(masm.GetBuffer()->GetStartAddress<uint32_t*>(), \
202                               masm.GetCursorOffset());                        \
203    }                                                                          \
204  }
205
206// TODO: Add SBC to the ADC tests.
207
208
209TEST(adc_shift) {
210  SETUP();
211
212  START();
213  // Initialize registers.
214  __ Mov(r0, 0);
215  __ Mov(r1, 1);
216  __ Mov(r2, 0x01234567);
217  __ Mov(r3, 0xfedcba98);
218
219  // Clear the C flag.
220  __ Adds(r0, r0, 0);
221
222  __ Adc(r4, r2, r3);
223  __ Adc(r5, r0, Operand(r1, LSL, 30));
224  __ Adc(r6, r0, Operand(r2, LSR, 16));
225  __ Adc(r7, r2, Operand(r3, ASR, 4));
226  __ Adc(r8, r2, Operand(r3, ROR, 8));
227  __ Adc(r9, r2, Operand(r3, RRX));
228  END();
229
230  RUN();
231
232  ASSERT_EQUAL_32(0xffffffff, r4);
233  ASSERT_EQUAL_32(INT32_C(1) << 30, r5);
234  ASSERT_EQUAL_32(0x00000123, r6);
235  ASSERT_EQUAL_32(0x01111110, r7);
236  ASSERT_EQUAL_32(0x9a222221, r8);
237  ASSERT_EQUAL_32(0x8091a2b3, r9);
238
239  START();
240  // Initialize registers.
241  __ Mov(r0, 0);
242  __ Mov(r1, 1);
243  __ Mov(r2, 0x01234567);
244  __ Mov(r3, 0xfedcba98);
245  __ Mov(r4, 0xffffffff);
246
247  // Set the C flag.
248  __ Adds(r0, r4, r1);
249
250  __ Adc(r5, r2, r3);
251  __ Adc(r6, r0, Operand(r1, LSL, 30));
252  __ Adc(r7, r0, Operand(r2, LSR, 16));
253  __ Adc(r8, r2, Operand(r3, ASR, 4));
254  __ Adc(r9, r2, Operand(r3, ROR, 8));
255  __ Adc(r10, r2, Operand(r3, RRX));
256  END();
257
258  RUN();
259
260  ASSERT_EQUAL_32(0xffffffff + 1, r5);
261  ASSERT_EQUAL_32((INT32_C(1) << 30) + 1, r6);
262  ASSERT_EQUAL_32(0x00000123 + 1, r7);
263  ASSERT_EQUAL_32(0x01111110 + 1, r8);
264  ASSERT_EQUAL_32(0x9a222221 + 1, r9);
265  ASSERT_EQUAL_32(0x0091a2b3 + 1, r10);
266
267  // Check that adc correctly sets the condition flags.
268  START();
269  __ Mov(r0, 0);
270  __ Mov(r1, 0xffffffff);
271  __ Mov(r2, 1);
272
273  // Clear the C flag.
274  __ Adds(r0, r0, 0);
275  __ Adcs(r3, r2, r1);
276  END();
277
278  RUN();
279
280  ASSERT_EQUAL_NZCV(ZCFlag);
281  ASSERT_EQUAL_32(0, r3);
282
283  START();
284  __ Mov(r0, 0);
285  __ Mov(r1, 0x80000000);
286  __ Mov(r2, 1);
287
288  // Clear the C flag.
289  __ Adds(r0, r0, 0);
290  __ Adcs(r3, r2, Operand(r1, ASR, 31));
291  END();
292
293  RUN();
294
295  ASSERT_EQUAL_NZCV(ZCFlag);
296  ASSERT_EQUAL_32(0, r3);
297
298  START();
299  __ Mov(r0, 0);
300  __ Mov(r1, 0x80000000);
301  __ Mov(r2, 0xffffffff);
302
303  // Clear the C flag.
304  __ Adds(r0, r0, 0);
305  __ Adcs(r3, r2, Operand(r1, LSR, 31));
306  END();
307
308  RUN();
309
310  ASSERT_EQUAL_NZCV(ZCFlag);
311  ASSERT_EQUAL_32(0, r3);
312
313  START();
314  __ Mov(r0, 0);
315  __ Mov(r1, 0x07ffffff);
316  __ Mov(r2, 0x10);
317
318  // Clear the C flag.
319  __ Adds(r0, r0, 0);
320  __ Adcs(r3, r2, Operand(r1, LSL, 4));
321  END();
322
323  RUN();
324
325  ASSERT_EQUAL_NZCV(NVFlag);
326  ASSERT_EQUAL_32(0x080000000, r3);
327
328  START();
329  __ Mov(r0, 0);
330  __ Mov(r1, 0xffffff00);
331  __ Mov(r2, 0xff000001);
332
333  // Clear the C flag.
334  __ Adds(r0, r0, 0);
335  __ Adcs(r3, r2, Operand(r1, ROR, 8));
336  END();
337
338  RUN();
339
340  ASSERT_EQUAL_NZCV(ZCFlag);
341  ASSERT_EQUAL_32(0, r3);
342
343  START();
344  __ Mov(r0, 0);
345  __ Mov(r1, 0xffffffff);
346  __ Mov(r2, 0x1);
347
348  // Clear the C flag, forcing RRX to insert 0 in r1's most significant bit.
349  __ Adds(r0, r0, 0);
350  __ Adcs(r3, r2, Operand(r1, RRX));
351  END();
352
353  RUN();
354
355  ASSERT_EQUAL_NZCV(NVFlag);
356  ASSERT_EQUAL_32(0x80000000, r3);
357
358  START();
359  __ Mov(r0, 0);
360  __ Mov(r1, 0xffffffff);
361  __ Mov(r2, 0x1);
362
363  // Set the C flag, forcing RRX to insert 1 in r1's most significant bit.
364  __ Adds(r0, r1, r2);
365  __ Adcs(r3, r2, Operand(r1, RRX));
366  END();
367
368  RUN();
369
370  ASSERT_EQUAL_NZCV(CFlag);
371  ASSERT_EQUAL_32(1, r3);
372
373  TEARDOWN();
374}
375
376
377TEST(adc_wide_imm) {
378  SETUP();
379
380  START();
381  __ Mov(r0, 0);
382
383  // Clear the C flag.
384  __ Adds(r0, r0, 0);
385
386  __ Adc(r1, r0, 0x12345678);
387  __ Adc(r2, r0, 0xffffffff);
388
389  // Set the C flag.
390  __ Cmp(r0, r0);
391
392  __ Adc(r3, r0, 0x12345678);
393  __ Adc(r4, r0, 0xffffffff);
394  END();
395
396  RUN();
397
398  ASSERT_EQUAL_32(0x12345678, r1);
399  ASSERT_EQUAL_32(0xffffffff, r2);
400  ASSERT_EQUAL_32(0x12345678 + 1, r3);
401  ASSERT_EQUAL_32(0, r4);
402
403  TEARDOWN();
404}
405
406
407// TODO: Add SUB tests to the ADD tests.
408
409
410TEST(add_imm) {
411  SETUP();
412
413  START();
414  __ Mov(r0, 0);
415  __ Mov(r1, 0x1111);
416  __ Mov(r2, 0xffffffff);
417  __ Mov(r3, 0x80000000);
418
419  __ Add(r4, r0, 0x12);
420  __ Add(r5, r1, 0x120000);
421  __ Add(r6, r0, 0xab << 12);
422  __ Add(r7, r2, 1);
423
424  END();
425
426  RUN();
427
428  ASSERT_EQUAL_32(0x12, r4);
429  ASSERT_EQUAL_32(0x121111, r5);
430  ASSERT_EQUAL_32(0xab000, r6);
431  ASSERT_EQUAL_32(0x0, r7);
432
433  TEARDOWN();
434}
435
436
437TEST(add_wide_imm) {
438  SETUP();
439
440  START();
441  __ Mov(r0, 0);
442  __ Mov(r1, 1);
443
444  __ Add(r2, r0, 0x12345678);
445  __ Add(r3, r1, 0xffff);
446  END();
447
448  RUN();
449
450  ASSERT_EQUAL_32(0x12345678, r2);
451  ASSERT_EQUAL_32(0x00010000, r3);
452
453  TEARDOWN();
454}
455
456
457TEST(add_shifted) {
458  SETUP();
459
460  START();
461  __ Mov(r0, 0);
462  __ Mov(r1, 0x01234567);
463  __ Mov(r2, 0x76543210);
464  __ Mov(r3, 0xffffffff);
465
466  __ Add(r4, r1, r2);
467  __ Add(r5, r0, Operand(r1, LSL, 8));
468  __ Add(r6, r0, Operand(r1, LSR, 8));
469  __ Add(r7, r0, Operand(r1, ASR, 8));
470  __ Add(r8, r3, Operand(r1, ROR, 8));
471
472  // Set the C flag.
473  __ Adds(r0, r3, 1);
474  __ Add(r9, r3, Operand(r1, RRX));
475
476  // Clear the C flag.
477  __ Adds(r0, r0, 0);
478  __ Add(r10, r3, Operand(r1, RRX));
479
480  END();
481
482  RUN();
483
484  ASSERT_EQUAL_32(0x77777777, r4);
485  ASSERT_EQUAL_32(0x23456700, r5);
486  ASSERT_EQUAL_32(0x00012345, r6);
487  ASSERT_EQUAL_32(0x00012345, r7);
488  ASSERT_EQUAL_32(0x67012344, r8);
489  ASSERT_EQUAL_32(0x8091a2b2, r9);
490  ASSERT_EQUAL_32(0x0091a2b2, r10);
491
492  TEARDOWN();
493}
494
495
496TEST(and_) {
497  SETUP();
498
499  START();
500  __ Mov(r0, 0x0000fff0);
501  __ Mov(r1, 0xf00000ff);
502  __ Mov(r2, 0xffffffff);
503
504  __ And(r3, r0, r1);
505  __ And(r4, r0, Operand(r1, LSL, 4));
506  __ And(r5, r0, Operand(r1, LSR, 1));
507  __ And(r6, r0, Operand(r1, ASR, 20));
508  __ And(r7, r0, Operand(r1, ROR, 28));
509  __ And(r8, r0, 0xff);
510
511  // Set the C flag.
512  __ Adds(r9, r2, 1);
513  __ And(r9, r1, Operand(r1, RRX));
514
515  // Clear the C flag.
516  __ Adds(r10, r0, 0);
517  __ And(r10, r1, Operand(r1, RRX));
518  END();
519
520  RUN();
521
522  ASSERT_EQUAL_32(0x000000f0, r3);
523  ASSERT_EQUAL_32(0x00000ff0, r4);
524  ASSERT_EQUAL_32(0x00000070, r5);
525  ASSERT_EQUAL_32(0x0000ff00, r6);
526  ASSERT_EQUAL_32(0x00000ff0, r7);
527  ASSERT_EQUAL_32(0x000000f0, r8);
528  ASSERT_EQUAL_32(0xf000007f, r9);
529  ASSERT_EQUAL_32(0x7000007f, r10);
530
531  TEARDOWN();
532}
533
534
535TEST(ands) {
536  SETUP();
537
538  START();
539  __ Mov(r0, 0);
540  __ Mov(r1, 0xf00000ff);
541
542  __ Ands(r0, r1, r1);
543  END();
544
545  RUN();
546
547  ASSERT_EQUAL_NZCV(NFlag);
548  ASSERT_EQUAL_32(0xf00000ff, r0);
549
550  START();
551  __ Mov(r0, 0x00fff000);
552  __ Mov(r1, 0xf00000ff);
553
554  __ Ands(r0, r0, Operand(r1, LSL, 4));
555  END();
556
557  RUN();
558
559  ASSERT_EQUAL_NZCV(ZCFlag);
560  ASSERT_EQUAL_32(0x00000000, r0);
561
562  START();
563  __ Mov(r0, 0x0000fff0);
564  __ Mov(r1, 0xf00000ff);
565
566  __ Ands(r0, r0, Operand(r1, LSR, 4));
567  END();
568
569  RUN();
570
571  ASSERT_EQUAL_NZCV(ZCFlag);
572  ASSERT_EQUAL_32(0x00000000, r0);
573
574  START();
575  __ Mov(r0, 0xf000fff0);
576  __ Mov(r1, 0xf00000ff);
577
578  __ Ands(r0, r0, Operand(r1, ASR, 4));
579  END();
580
581  RUN();
582
583  ASSERT_EQUAL_NZCV(NCFlag);
584  ASSERT_EQUAL_32(0xf0000000, r0);
585
586  START();
587  __ Mov(r0, 0x80000000);
588  __ Mov(r1, 0x00000001);
589
590  __ Ands(r0, r0, Operand(r1, ROR, 1));
591  END();
592
593  RUN();
594
595  ASSERT_EQUAL_NZCV(NCFlag);
596  ASSERT_EQUAL_32(0x80000000, r0);
597
598  START();
599  __ Mov(r0, 0x80000000);
600  __ Mov(r1, 0x80000001);
601
602  // Clear the C flag, forcing RRX to insert 0 in r1's most significant bit.
603  __ Adds(r2, r0, 0);
604  __ Ands(r2, r0, Operand(r1, RRX));
605  END();
606
607  RUN();
608
609  ASSERT_EQUAL_NZCV(ZCFlag);
610  ASSERT_EQUAL_32(0, r2);
611
612  START();
613  __ Mov(r0, 0x80000000);
614  __ Mov(r1, 0x80000001);
615  __ Mov(r2, 0xffffffff);
616
617  // Set the C flag, forcing RRX to insert 1 in r1's most significant bit.
618  __ Adds(r2, r2, 1);
619  __ Ands(r2, r0, Operand(r1, RRX));
620  END();
621
622  RUN();
623
624  ASSERT_EQUAL_NZCV(NCFlag);
625  ASSERT_EQUAL_32(0x80000000, r2);
626
627  START();
628  __ Mov(r0, 0xfff0);
629
630  __ Ands(r0, r0, 0xf);
631  END();
632
633  RUN();
634
635  ASSERT_EQUAL_NZCV(ZFlag);
636  ASSERT_EQUAL_32(0x00000000, r0);
637
638  START();
639  __ Mov(r0, 0xff000000);
640
641  __ Ands(r0, r0, 0x80000000);
642  END();
643
644  RUN();
645
646  ASSERT_EQUAL_NZCV(NCFlag);
647  ASSERT_EQUAL_32(0x80000000, r0);
648
649  TEARDOWN();
650}
651
652
653// TODO: fix this test in T32.
654TEST_A32(adr) {
655  SETUP();
656
657  Label label_1, label_2, label_3, label_4;
658
659  START();
660  __ Mov(r0, 0x0);
661  __ Adr(r1, &label_3);   // Set to zero to indicate success.
662
663  __ Adr(r2, &label_1);   // Multiple forward references to the same label.
664  __ Adr(r3, &label_1);
665  __ Adr(r4, &label_1);
666
667  __ Bind(&label_2);
668  __ Eor(r5, r2, r3);  // Ensure that r2,r3 and r4 are identical.
669  __ Eor(r6, r2, r4);
670  __ Mov(r0, r5);
671  __ Mov(r0, r6);
672  __ Bx(r2);  // label_1, label_3
673
674  __ Bind(&label_3);
675  __ Adr(r2, &label_3);   // Self-reference (offset 0).
676  __ Eor(r1, r1, r2);
677  __ Adr(r2, &label_4);   // Simple forward reference.
678  __ Bx(r2);  // label_4
679
680  __ Bind(&label_1);
681  __ Adr(r2, &label_3);   // Multiple reverse references to the same label.
682  __ Adr(r3, &label_3);
683  __ Adr(r4, &label_3);
684  __ Adr(r5, &label_2);   // Simple reverse reference.
685  __ Bx(r5);  // label_2
686
687  __ Bind(&label_4);
688  END();
689
690  RUN();
691
692  ASSERT_EQUAL_32(0x0, r0);
693  ASSERT_EQUAL_32(0x0, r1);
694
695  TEARDOWN();
696}
697
698
699TEST(shift_imm) {
700  SETUP();
701
702  START();
703  __ Mov(r0, 0);
704  __ Mov(r1, 0xfedcba98);
705  __ Mov(r2, 0xffffffff);
706
707  __ Lsl(r3, r1, 4);
708  __ Lsr(r4, r1, 8);
709  __ Asr(r5, r1, 16);
710  __ Ror(r6, r1, 20);
711  END();
712
713  RUN();
714
715  ASSERT_EQUAL_32(0xedcba980, r3);
716  ASSERT_EQUAL_32(0x00fedcba, r4);
717  ASSERT_EQUAL_32(0xfffffedc, r5);
718  ASSERT_EQUAL_32(0xcba98fed, r6);
719
720  TEARDOWN();
721}
722
723
724TEST(shift_reg) {
725  SETUP();
726
727  START();
728  __ Mov(r0, 0);
729  __ Mov(r1, 0xfedcba98);
730  __ Mov(r2, 0xffffffff);
731
732  __ Add(r9, r0, 4);
733  __ Lsl(r3, r1, r9);
734
735  __ Add(r9, r0, 8);
736  __ Lsr(r4, r1, r9);
737
738  __ Add(r9, r0, 16);
739  __ Asr(r5, r1, r9);
740
741  __ Add(r9, r0, 20);
742  __ Ror(r6, r1, r9);
743
744  // Set the C flag.
745  __ Adds(r7, r2, 1);
746  __ Rrx(r7, r1);
747
748  // Clear the C flag.
749  __ Adds(r8, r0, 0);
750  __ Rrx(r8, r1);
751  END();
752
753  RUN();
754
755  ASSERT_EQUAL_32(0xedcba980, r3);
756  ASSERT_EQUAL_32(0x00fedcba, r4);
757  ASSERT_EQUAL_32(0xfffffedc, r5);
758  ASSERT_EQUAL_32(0xcba98fed, r6);
759  ASSERT_EQUAL_32(0xff6e5d4c, r7);
760  ASSERT_EQUAL_32(0x7f6e5d4c, r8);
761
762  TEARDOWN();
763}
764
765
766TEST(branch_cond) {
767  SETUP();
768
769  Label done, wrong;
770
771  START();
772  __ Mov(r0, 0x0);
773  __ Mov(r1, 0x1);
774  __ Mov(r2, 0x80000000);
775  // TODO: Use r0 instead of r3 when r0 becomes available.
776  __ Mov(r3, 0x1);
777
778  // For each 'cmp' instruction below, condition codes other than the ones
779  // following it would branch.
780
781  __ Cmp(r1, 0);
782  __ B(eq, &wrong);
783  __ B(lo, &wrong);
784  __ B(mi, &wrong);
785  __ B(vs, &wrong);
786  __ B(ls, &wrong);
787  __ B(lt, &wrong);
788  __ B(le, &wrong);
789  Label ok_1;
790  __ B(ne, &ok_1);
791  // TODO: Use __ Mov(r0, 0x0) instead.
792  __ Add(r3, r0, 0x0);
793  __ Bind(&ok_1);
794
795  __ Cmp(r1, 1);
796  __ B(ne, &wrong);
797  __ B(lo, &wrong);
798  __ B(mi, &wrong);
799  __ B(vs, &wrong);
800  __ B(hi, &wrong);
801  __ B(lt, &wrong);
802  __ B(gt, &wrong);
803  Label ok_2;
804  __ B(pl, &ok_2);
805  // TODO: Use __ Mov(r0, 0x0) instead.
806  __ Add(r3, r0, 0x0);
807  __ Bind(&ok_2);
808
809  __ Cmp(r1, 2);
810  __ B(eq, &wrong);
811  __ B(hs, &wrong);
812  __ B(pl, &wrong);
813  __ B(vs, &wrong);
814  __ B(hi, &wrong);
815  __ B(ge, &wrong);
816  __ B(gt, &wrong);
817  Label ok_3;
818  __ B(vc, &ok_3);
819  // TODO: Use __ Mov(r0, 0x0) instead.
820  __ Add(r3, r0, 0x0);
821  __ Bind(&ok_3);
822
823  __ Cmp(r2, 1);
824  __ B(eq, &wrong);
825  __ B(lo, &wrong);
826  __ B(mi, &wrong);
827  __ B(vc, &wrong);
828  __ B(ls, &wrong);
829  __ B(ge, &wrong);
830  __ B(gt, &wrong);
831  Label ok_4;
832  __ B(le, &ok_4);
833  // TODO: Use __ Mov(r0, 0x0) instead.
834  __ Add(r3, r0, 0x0);
835  __ Bind(&ok_4);
836
837  Label ok_5;
838  __ B(&ok_5);
839  // TODO: Use __ Mov(r0, 0x0) instead.
840  __ Add(r3, r0, 0x0);
841  __ Bind(&ok_5);
842
843  __ B(&done);
844
845  __ Bind(&wrong);
846  // TODO: Use __ Mov(r0, 0x0) instead.
847  __ Add(r3, r0, 0x0);
848
849  __ Bind(&done);
850  END();
851
852  RUN();
853
854  // TODO: Use r0.
855  ASSERT_EQUAL_32(0x1, r3);
856
857  TEARDOWN();
858}
859
860
861TEST(bfc_bfi) {
862  SETUP();
863
864  START();
865  __ Mov(r0, 0xffffffff);
866  __ Mov(r1, 0x01234567);
867  __ Mov(r2, 0x0);
868
869  __ Bfc(r0, 0, 3);
870  __ Bfc(r0, 16, 5);
871
872  __ Bfi(r2, r1, 0, 8);
873  __ Bfi(r2, r1, 16, 16);
874  END();
875
876  RUN();
877
878  ASSERT_EQUAL_32(0xffe0fff8, r0);
879  ASSERT_EQUAL_32(0x45670067, r2);
880
881  TEARDOWN();
882}
883
884
885TEST(bic) {
886  SETUP();
887
888  START();
889  __ Mov(r0, 0xfff0);
890  __ Mov(r1, 0xf00000ff);
891  __ Mov(r2, 0xffffffff);
892
893  __ Bic(r3, r0, r1);
894  __ Bic(r4, r0, Operand(r1, LSL, 4));
895  __ Bic(r5, r0, Operand(r1, LSR, 1));
896  __ Bic(r6, r0, Operand(r1, ASR, 20));
897  __ Bic(r7, r0, Operand(r1, ROR, 28));
898  __ Bic(r8, r0, 0x1f);
899
900  // Set the C flag.
901  __ Adds(r9, r2, 1);
902  __ Bic(r9, r1, Operand(r1, RRX));
903
904  // Clear the C flag.
905  __ Adds(r10, r0, 0);
906  __ Bic(r10, r1, Operand(r1, RRX));
907  END();
908
909  RUN();
910
911  ASSERT_EQUAL_32(0x0000ff00, r3);
912  ASSERT_EQUAL_32(0x0000f000, r4);
913  ASSERT_EQUAL_32(0x0000ff80, r5);
914  ASSERT_EQUAL_32(0x000000f0, r6);
915  ASSERT_EQUAL_32(0x0000f000, r7);
916  ASSERT_EQUAL_32(0x0000ffe0, r8);
917  ASSERT_EQUAL_32(0x00000080, r9);
918  ASSERT_EQUAL_32(0x80000080, r10);
919
920  TEARDOWN();
921}
922
923
924TEST(bics) {
925  SETUP();
926
927  START();
928  __ Mov(r0, 0);
929  __ Mov(r1, 0xf00000ff);
930
931  __ Bics(r0, r1, r1);
932  END();
933
934  RUN();
935
936  ASSERT_EQUAL_NZCV(ZFlag);
937  ASSERT_EQUAL_32(0, r0);
938
939  START();
940  __ Mov(r0, 0x00fff000);
941  __ Mov(r1, 0x0fffff00);
942
943  __ Bics(r0, r0, Operand(r1, LSL, 4));
944  END();
945
946  RUN();
947
948  ASSERT_EQUAL_NZCV(ZFlag);
949  ASSERT_EQUAL_32(0x00000000, r0);
950
951  START();
952  __ Mov(r0, 0x0000fff0);
953  __ Mov(r1, 0x0fffff00);
954
955  __ Bics(r0, r0, Operand(r1, LSR, 4));
956  END();
957
958  RUN();
959
960  ASSERT_EQUAL_NZCV(ZFlag);
961  ASSERT_EQUAL_32(0x00000000, r0);
962
963  START();
964  __ Mov(r0, 0xf000fff0);
965  __ Mov(r1, 0x0fffff00);
966
967  __ Bics(r0, r0, Operand(r1, ASR, 4));
968  END();
969
970  RUN();
971
972  ASSERT_EQUAL_NZCV(NFlag);
973  ASSERT_EQUAL_32(0xf0000000, r0);
974
975  START();
976  __ Mov(r0, 0x80000000);
977  __ Mov(r1, 0xfffffffe);
978
979  __ Bics(r0, r0, Operand(r1, ROR, 1));
980  END();
981
982  RUN();
983
984  ASSERT_EQUAL_NZCV(NFlag);
985  ASSERT_EQUAL_32(0x80000000, r0);
986
987  START();
988  __ Mov(r0, 0x80000000);
989  __ Mov(r1, 0x80000001);
990
991  // Clear the C flag, forcing RRX to insert 0 in r1's most significant bit.
992  __ Adds(r2, r0, 0);
993  __ Bics(r2, r0, Operand(r1, RRX));
994  END();
995
996  RUN();
997
998  ASSERT_EQUAL_NZCV(NCFlag);
999  ASSERT_EQUAL_32(0x80000000, r2);
1000
1001  START();
1002  __ Mov(r0, 0x80000000);
1003  __ Mov(r1, 0x80000001);
1004  __ Mov(r2, 0xffffffff);
1005
1006  // Set the C flag, forcing RRX to insert 1 in r1's most significant bit.
1007  __ Adds(r2, r2, 1);
1008  __ Bics(r2, r0, Operand(r1, RRX));
1009  END();
1010
1011  RUN();
1012
1013  ASSERT_EQUAL_NZCV(ZCFlag);
1014  ASSERT_EQUAL_32(0, r2);
1015
1016  START();
1017  __ Mov(r0, 0xf000);
1018
1019  __ Bics(r0, r0, 0xf000);
1020  END();
1021
1022  RUN();
1023
1024  ASSERT_EQUAL_NZCV(ZFlag);
1025  ASSERT_EQUAL_32(0x00000000, r0);
1026
1027  START();
1028  __ Mov(r0, 0xff000000);
1029
1030  __ Bics(r0, r0, 0x7fffffff);
1031  END();
1032
1033  RUN();
1034
1035  ASSERT_EQUAL_NZCV(NFlag);
1036  ASSERT_EQUAL_32(0x80000000, r0);
1037
1038  TEARDOWN();
1039}
1040
1041
1042TEST_T32(veneer_pool_in_delegate) {
1043  SETUP();
1044
1045  START();
1046
1047  Label end;
1048
1049  VIXL_CHECK(masm.VeneerPoolIsEmpty());
1050  VIXL_CHECK(masm.LiteralPoolIsEmpty());
1051
1052  __ Mov(r0, 1);
1053  __ Cbz(r0, &end);
1054
1055  VIXL_CHECK(!masm.VeneerPoolIsEmpty());
1056  VIXL_CHECK(masm.LiteralPoolIsEmpty());
1057
1058  // Generate enough code to have, after the loop, a margin of only one 16-bit
1059  // instruction that can be generated before we need to generate the veneer
1060  // pool.
1061  // Use `CodeBufferCheckScope` and the assembler to generate the code.
1062  int32_t space =
1063      masm.GetMarginBeforeVeneerEmission() - k16BitT32InstructionSizeInBytes;
1064  {
1065    AssemblerAccurateScope scope(&masm,
1066                                 space,
1067                                 CodeBufferCheckScope::kExactSize);
1068    while (space > 0) {
1069      __ nop();
1070      space -= k16BitT32InstructionSizeInBytes;
1071    }
1072  }
1073
1074  // We should not have emitted the veneer pool at this point.
1075  VIXL_CHECK(!masm.VeneerPoolIsEmpty());
1076  VIXL_CHECK(masm.LiteralPoolIsEmpty());
1077  VIXL_CHECK(
1078      masm.GetMarginBeforeVeneerEmission() == k16BitT32InstructionSizeInBytes);
1079
1080  // Now generate `Mov(r1, 0x12345678)`. It needs to 16-bit assembler
1081  // instructions, so it has to go through the `MacroAssembler` delegate. Since
1082  // there is only margin for one instruction to be generated, the pool will
1083  // have to be generated from within the `MacroAssembler` delegate. That should
1084  // not fire.
1085  Label check;
1086  __ Bind(&check);
1087  __ Mov(r1, 0x12345678);
1088  VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&check) >
1089             2 * kMaxInstructionSizeInBytes);
1090  __ Bind(&end);
1091
1092  END();
1093
1094  RUN();
1095
1096  ASSERT_EQUAL_32(0x12345678, r1);
1097
1098  TEARDOWN();
1099}
1100
1101
1102TEST_T32(literal_pool_in_delegate) {
1103  SETUP();
1104
1105  START();
1106
1107  PrintDisassembler disasm(std::cout);
1108
1109  VIXL_CHECK(masm.VeneerPoolIsEmpty());
1110  VIXL_CHECK(masm.LiteralPoolIsEmpty());
1111
1112  __ Ldrd(r0, r1, 0x1234567890abcdef);
1113
1114  VIXL_CHECK(masm.VeneerPoolIsEmpty());
1115  VIXL_CHECK(!masm.LiteralPoolIsEmpty());
1116
1117  // Generate enough code to have, after the loop, a margin of only one 16-bit
1118  // instruction that can be generated before we need to generate the literal
1119  // pool.
1120  // Use `CodeBufferCheckScope` and the assembler to generate the code.
1121  int32_t space = masm.GetMarginBeforeLiteralEmission() -
1122      2 * k16BitT32InstructionSizeInBytes;
1123  {
1124    AssemblerAccurateScope scope(&masm,
1125                                 space,
1126                                 CodeBufferCheckScope::kExactSize);
1127    while (space > 0) {
1128      __ nop();
1129      space -= k16BitT32InstructionSizeInBytes;
1130    }
1131  }
1132
1133  // We should not have emitted the literal pool at this point.
1134  VIXL_CHECK(masm.VeneerPoolIsEmpty());
1135  VIXL_CHECK(!masm.LiteralPoolIsEmpty());
1136  VIXL_CHECK(masm.GetMarginBeforeLiteralEmission() ==
1137             2 * k16BitT32InstructionSizeInBytes);
1138
1139  // Now generate `Mov(r1, 0x12345678)`. It needs to 16-bit assembler
1140  // instructions, so it has to go through the `MacroAssembler` delegate. Since
1141  // there is only margin for one instruction to be generated, the pool will
1142  // have to be generated from within the `MacroAssembler` delegate. That should
1143  // not fire.
1144  Label check;
1145  __ Bind(&check);
1146  __ Mov(r1, 0x12345678);
1147  VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&check) >
1148             2 * kMaxInstructionSizeInBytes);
1149
1150  VIXL_CHECK(masm.VeneerPoolIsEmpty());
1151  VIXL_CHECK(masm.LiteralPoolIsEmpty());
1152
1153  END();
1154
1155  disasm.DisassembleT32Buffer(masm.GetBuffer()->GetStartAddress<uint16_t*>(),
1156                              masm.GetCursorOffset());
1157
1158  RUN();
1159
1160  ASSERT_EQUAL_32(0x12345678, r1);
1161
1162  TEARDOWN();
1163}
1164
1165
1166TEST(emit_single_literal) {
1167  SETUP();
1168
1169  START();
1170  // Make sure the pool is empty.
1171  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1172  ASSERT_LITERAL_POOL_SIZE(0);
1173
1174  // Create one literal pool entry.
1175  __ Ldrd(r0, r1, 0x1234567890abcdef);
1176  ASSERT_LITERAL_POOL_SIZE(8);
1177  __ Vldr(s0, 1.0);
1178  __ Vldr(d1, 2.0);
1179  __ Vmov(d2, 4.1);
1180  __ Vmov(s8, 8.2);
1181  ASSERT_LITERAL_POOL_SIZE(20);
1182  END();
1183
1184  RUN();
1185
1186  // Check that the literals loaded correctly.
1187  ASSERT_EQUAL_32(0x90abcdef, r0);
1188  ASSERT_EQUAL_32(0x12345678, r1);
1189  ASSERT_EQUAL_FP32(1.0f, s0);
1190  ASSERT_EQUAL_FP64(2.0, d1);
1191  ASSERT_EQUAL_FP64(4.1, d2);
1192  ASSERT_EQUAL_FP32(8.2f, s8);
1193
1194  TEARDOWN();
1195}
1196
1197
1198#undef __
1199#define __ masm->
1200
1201
1202void EmitLdrdLiteralTest(MacroAssembler* masm) {
1203  const int ldrd_range = masm->IsUsingA32() ? 255 : 1020;
1204  // We want to emit code up to the maximum literal load range and ensure the
1205  // pool has not been emitted. Compute the limit (end).
1206  ptrdiff_t end =
1207      AlignDown(
1208          // Align down the PC to 4 bytes as the instruction does when it's
1209          // executed.
1210          // The PC will be the cursor offset plus the architecture state PC
1211          // offset.
1212          AlignDown(masm->GetBuffer()->GetCursorOffset() +
1213                    masm->GetArchitectureStatePCOffset(), 4) +
1214          // Maximum range allowed to access the constant.
1215          ldrd_range -
1216          // A branch will be generated before the pool.
1217          kMaxInstructionSizeInBytes,
1218          // AlignDown to 4 byte as the literals will be 4 byte aligned.
1219          4);
1220
1221  // Create one literal pool entry.
1222  __ Ldrd(r0, r1, 0x1234567890abcdef);
1223  ASSERT_LITERAL_POOL_SIZE(8);
1224
1225  int32_t margin = masm->GetMarginBeforeLiteralEmission();
1226  {
1227    AssemblerAccurateScope scope(masm,
1228                                 margin,
1229                                 CodeBufferCheckScope::kExactSize);
1230    // Opening the scope should not have triggered the emission of the literal
1231    // pool.
1232    VIXL_CHECK(!masm->LiteralPoolIsEmpty());
1233    while (masm->GetCursorOffset() < end) {
1234      __ nop();
1235    }
1236    VIXL_CHECK(masm->GetCursorOffset() == end);
1237  }
1238
1239  // Check that the pool has not been emited along the way.
1240  ASSERT_LITERAL_POOL_SIZE(8);
1241  // This extra instruction should trigger an emit of the pool.
1242  __ Nop();
1243  // The pool should have been emitted.
1244  ASSERT_LITERAL_POOL_SIZE(0);
1245}
1246
1247
1248#undef __
1249#define __ masm.
1250
1251
1252TEST(emit_literal) {
1253  SETUP();
1254
1255  START();
1256
1257  // Make sure the pool is empty.
1258  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1259  ASSERT_LITERAL_POOL_SIZE(0);
1260
1261  EmitLdrdLiteralTest(&masm);
1262
1263  const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
1264  const int string_size = AlignUp(ldrd_range + kMaxInstructionSizeInBytes, 4);
1265  std::string test_string(string_size, 'x');
1266  StringLiteral big_literal(test_string.c_str());
1267  __ Adr(r4, &big_literal);
1268  // This add will overflow the literal pool and force a rewind.
1269  // That means that the string will be generated then, then Ldrd and the
1270  // ldrd's value will be alone in the pool.
1271  __ Ldrd(r2, r3, 0xcafebeefdeadbaba);
1272  ASSERT_LITERAL_POOL_SIZE(8);
1273
1274  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1275  ASSERT_LITERAL_POOL_SIZE(0);
1276  __ Ldr(r4, MemOperand(r4));  // Load the first 4 characters in r4.
1277  END();
1278
1279  RUN();
1280
1281  // Check that the literals loaded correctly.
1282  ASSERT_EQUAL_32(0x90abcdef, r0);
1283  ASSERT_EQUAL_32(0x12345678, r1);
1284  ASSERT_EQUAL_32(0xdeadbaba, r2);
1285  ASSERT_EQUAL_32(0xcafebeef, r3);
1286  ASSERT_EQUAL_32(0x78787878, r4);
1287
1288  TEARDOWN();
1289}
1290
1291TEST_T32(emit_literal_unaligned) {
1292  SETUP();
1293
1294  START();
1295
1296  // Make sure the pool is empty.
1297  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1298  ASSERT_LITERAL_POOL_SIZE(0);
1299
1300  // Generate a nop to break the 4 bytes alignment.
1301  __ Nop();
1302
1303  EmitLdrdLiteralTest(&masm);
1304
1305  END();
1306
1307  RUN();
1308
1309  // Check that the literals loaded correctly.
1310  ASSERT_EQUAL_32(0x90abcdef, r0);
1311  ASSERT_EQUAL_32(0x12345678, r1);
1312
1313  TEARDOWN();
1314}
1315
1316
1317TEST(literal_multiple_uses) {
1318  SETUP();
1319
1320  START();
1321  Literal<int32_t> lit(42);
1322  __ Ldr(r0, &lit);
1323  ASSERT_LITERAL_POOL_SIZE(4);
1324
1325  // Multiple uses of the same literal object should not make the
1326  // pool grow.
1327  __ Ldrb(r1, &lit);
1328  __ Ldrsb(r2, &lit);
1329  __ Ldrh(r3, &lit);
1330  __ Ldrsh(r4, &lit);
1331  ASSERT_LITERAL_POOL_SIZE(4);
1332
1333  END();
1334
1335  RUN();
1336
1337  ASSERT_EQUAL_32(42, r0);
1338  ASSERT_EQUAL_32(42, r1);
1339  ASSERT_EQUAL_32(42, r2);
1340  ASSERT_EQUAL_32(42, r3);
1341  ASSERT_EQUAL_32(42, r4);
1342
1343  TEARDOWN();
1344}
1345
1346
1347// A test with two loads literal which go out of range at the same time.
1348TEST_A32(ldr_literal_range_same_time) {
1349  SETUP();
1350
1351  START();
1352  const int ldrd_range = 255;
1353  // We need to take into account the jump over the pool.
1354  const int ldrd_padding = ldrd_range - kA32InstructionSizeInBytes;
1355  const int ldr_range = 4095;
1356  // We need to take into account the ldrd padding and the ldrd instruction.
1357  const int ldr_padding = ldr_range - ldrd_padding - kA32InstructionSizeInBytes;
1358
1359  __ Ldr(r1, 0x12121212);
1360  ASSERT_LITERAL_POOL_SIZE(4);
1361
1362  for (unsigned int i = 0; i < ldr_padding / kA32InstructionSizeInBytes; ++i) {
1363    __ Mov(r0, 0);
1364  }
1365
1366  __ Ldrd(r2, r3, 0x1234567890abcdef);
1367  ASSERT_LITERAL_POOL_SIZE(12);
1368
1369  for (unsigned int i = 0; i < ldrd_padding / kA32InstructionSizeInBytes; ++i) {
1370    __ Mov(r0, 0);
1371  }
1372  ASSERT_LITERAL_POOL_SIZE(12);
1373
1374  // This mov will put the two loads literal out of range and will force
1375  // the literal pool emission.
1376  __ Mov(r0, 0);
1377  ASSERT_LITERAL_POOL_SIZE(0);
1378  END();
1379
1380  RUN();
1381
1382  ASSERT_EQUAL_32(0x12121212, r1);
1383  ASSERT_EQUAL_32(0x90abcdef, r2);
1384  ASSERT_EQUAL_32(0x12345678, r3);
1385
1386  TEARDOWN();
1387}
1388
1389
1390TEST(ldr_literal_mix_types) {
1391  SETUP();
1392
1393  START();
1394  Literal<uint64_t> l0(0x1234567890abcdef);
1395  Literal<int32_t> l1(0x12345678);
1396  Literal<uint16_t> l2(1234);
1397  Literal<int16_t> l3(-678);
1398  Literal<uint8_t> l4(42);
1399  Literal<int8_t> l5(-12);
1400
1401  __ Ldrd(r0, r1, &l0);
1402  __ Ldr(r2, &l1);
1403  __ Ldrh(r3, &l2);
1404  __ Ldrsh(r4, &l3);
1405  __ Ldrb(r5, &l4);
1406  __ Ldrsb(r6, &l5);
1407  ASSERT_LITERAL_POOL_SIZE(28);
1408
1409  END();
1410
1411  RUN();
1412
1413  ASSERT_EQUAL_32(0x90abcdef, r0);
1414  ASSERT_EQUAL_32(0x12345678, r1);
1415  ASSERT_EQUAL_32(0x12345678, r2);
1416  ASSERT_EQUAL_32(1234, r3);
1417  ASSERT_EQUAL_32(-678, r4);
1418  ASSERT_EQUAL_32(42, r5);
1419  ASSERT_EQUAL_32(-12, r6);
1420
1421  TEARDOWN();
1422}
1423
1424
1425struct LdrLiteralRangeTest {
1426  void (MacroAssembler::*instruction)(Register, RawLiteral*);
1427  Register result_reg;
1428  int a32_range;
1429  int t32_range;
1430  uint32_t literal_value;
1431  uint32_t test_value;
1432};
1433
1434
1435const LdrLiteralRangeTest kLdrLiteralRangeTestData[] = {
1436  {&MacroAssembler::Ldr, r1, 4095, 4095, 0x12345678, 0x12345678 },
1437  {&MacroAssembler::Ldrh, r2, 255, 4095, 0xabcdefff, 0x0000efff },
1438  {&MacroAssembler::Ldrsh, r3, 255, 4095, 0x00008765, 0xffff8765 },
1439  {&MacroAssembler::Ldrb, r4, 4095, 4095, 0x12345678, 0x00000078 },
1440  {&MacroAssembler::Ldrsb, r5, 255, 4095, 0x00000087, 0xffffff87 }
1441};
1442
1443
1444void GenerateLdrLiteralTriggerPoolEmission(InstructionSet isa,
1445                                           bool unaligned_ldr) {
1446  SETUP();
1447
1448  for (size_t i = 0; i < ARRAY_SIZE(kLdrLiteralRangeTestData); ++i) {
1449    const LdrLiteralRangeTest& test = kLdrLiteralRangeTestData[i];
1450
1451    START();
1452
1453    if (unaligned_ldr) {
1454      // Generate a nop to break the 4-byte alignment.
1455      __ Nop();
1456      VIXL_ASSERT((masm.GetBuffer()->GetCursorOffset() % 4) == 2);
1457    }
1458
1459    __ Ldr(r6, 0x12345678);
1460    ASSERT_LITERAL_POOL_SIZE(4);
1461
1462    // In A32 mode we can fit one more instruction before being forced to emit
1463    // the pool. However the newly added literal will be to far for the ldr
1464    // instruction forcing the pool to be emitted earlier. So we need to make
1465    // sure that we need to stop one instruction before the margin on A32 for
1466    // this test to work as expected.
1467    int32_t margin_offset = masm.IsUsingA32() ? kA32InstructionSizeInBytes : 0;
1468
1469    size_t expected_pool_size = 4;
1470    while ((masm.GetMarginBeforeLiteralEmission() - margin_offset) >=
1471	   static_cast<int32_t>(kMaxInstructionSizeInBytes)) {
1472      __ Ldr(r7, 0x90abcdef);
1473      // Each ldr instruction will force a new literal value to be added
1474      // to the pool. Check that the literal pool grows accordingly.
1475      expected_pool_size += 4;
1476      ASSERT_LITERAL_POOL_SIZE(expected_pool_size);
1477    }
1478
1479    // This ldr will force the literal pool to be emitted before emitting
1480    // the load and will create a new pool for the new literal used by this ldr.
1481    Literal<uint32_t> literal(test.literal_value);
1482    (masm.*test.instruction)(test.result_reg, &literal);
1483    ASSERT_LITERAL_POOL_SIZE(4);
1484
1485    END();
1486
1487    RUN();
1488
1489    ASSERT_EQUAL_32(0x12345678, r6);
1490    ASSERT_EQUAL_32(0x90abcdef, r7);
1491    ASSERT_EQUAL_32(test.test_value, test.result_reg);
1492  }
1493
1494  TEARDOWN();
1495}
1496
1497
1498TEST(ldr_literal_trigger_pool_emission) {
1499  GenerateLdrLiteralTriggerPoolEmission(isa, false);
1500}
1501
1502
1503TEST_T32(ldr_literal_trigger_pool_emission_unaligned) {
1504  GenerateLdrLiteralTriggerPoolEmission(isa, true);
1505}
1506
1507
1508void GenerateLdrLiteralRangeTest(InstructionSet isa, bool unaligned_ldr) {
1509  SETUP();
1510
1511  for (size_t i = 0; i < ARRAY_SIZE(kLdrLiteralRangeTestData); ++i) {
1512    const LdrLiteralRangeTest& test = kLdrLiteralRangeTestData[i];
1513
1514    START();
1515
1516    // Make sure the pool is empty.
1517    masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1518    ASSERT_LITERAL_POOL_SIZE(0);
1519
1520    if (unaligned_ldr) {
1521      // Generate a nop to break the 4-byte alignment.
1522      __ Nop();
1523      VIXL_ASSERT((masm.GetBuffer()->GetCursorOffset() % 4) == 2);
1524    }
1525
1526    Literal<uint32_t> literal(test.literal_value);
1527    (masm.*test.instruction)(test.result_reg, &literal);
1528    ASSERT_LITERAL_POOL_SIZE(4);
1529
1530    // Generate enough instruction so that we go out of range for the load
1531    // literal we just emitted.
1532    ptrdiff_t end =
1533        masm.GetBuffer()->GetCursorOffset() +
1534        ((masm.IsUsingA32()) ? test.a32_range : test.t32_range);
1535    while (masm.GetBuffer()->GetCursorOffset() < end) {
1536      __ Mov(r0, 0);
1537    }
1538
1539    // The literal pool should have been emitted now.
1540    VIXL_CHECK(literal.IsBound());
1541    ASSERT_LITERAL_POOL_SIZE(0);
1542
1543    END();
1544
1545    RUN();
1546
1547    ASSERT_EQUAL_32(test.test_value, test.result_reg);
1548  }
1549
1550  TEARDOWN();
1551}
1552
1553
1554TEST(ldr_literal_range) {
1555  GenerateLdrLiteralRangeTest(isa, false);
1556}
1557
1558
1559TEST_T32(ldr_literal_range_unaligned) {
1560  GenerateLdrLiteralRangeTest(isa, true);
1561}
1562
1563
1564TEST(string_literal) {
1565  SETUP();
1566
1567  START();
1568  // Make sure the pool is empty.
1569  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1570  ASSERT_LITERAL_POOL_SIZE(0);
1571
1572  StringLiteral hello_string("hello");
1573
1574  __ Ldrb(r1, &hello_string);
1575
1576  __ Adr(r0, &hello_string);
1577  __ Ldrb(r2, MemOperand(r0));
1578  END();
1579
1580  RUN();
1581
1582  ASSERT_EQUAL_32('h', r1);
1583  ASSERT_EQUAL_32('h', r2);
1584
1585  TEARDOWN();
1586}
1587
1588
1589TEST(custom_literal_in_pool) {
1590  SETUP();
1591
1592  START();
1593  // Make sure the pool is empty.
1594  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1595  ASSERT_LITERAL_POOL_SIZE(0);
1596
1597  Literal<uint32_t> l0(static_cast<uint32_t>(0x12345678));
1598  __ Ldr(r0, &l0);
1599  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1600  __ Ldr(r1, &l0);
1601  ASSERT_LITERAL_POOL_SIZE(0);
1602
1603  Literal<uint64_t> cafebeefdeadbaba(0xcafebeefdeadbaba);
1604  __ Ldrd(r8, r9, &cafebeefdeadbaba);
1605  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1606  __ Ldrd(r2, r3, &cafebeefdeadbaba);
1607  ASSERT_LITERAL_POOL_SIZE(0);
1608
1609  Literal<uint32_t> l1(0x09abcdef);
1610  __ Adr(r4, &l1);
1611  __ Ldr(r4, MemOperand(r4));
1612  masm.EmitLiteralPool();
1613  __ Adr(r5, &l1);
1614  __ Ldr(r5, MemOperand(r5));
1615  ASSERT_LITERAL_POOL_SIZE(0);
1616
1617  END();
1618
1619  RUN();
1620
1621  // Check that the literals loaded correctly.
1622  ASSERT_EQUAL_32(0x12345678, r0);
1623  ASSERT_EQUAL_32(0x12345678, r1);
1624  ASSERT_EQUAL_32(0xdeadbaba, r2);
1625  ASSERT_EQUAL_32(0xcafebeef, r3);
1626  ASSERT_EQUAL_32(0xdeadbaba, r8);
1627  ASSERT_EQUAL_32(0xcafebeef, r9);
1628  ASSERT_EQUAL_32(0x09abcdef, r4);
1629  ASSERT_EQUAL_32(0x09abcdef, r5);
1630}
1631
1632
1633TEST(custom_literal_place) {
1634  SETUP();
1635
1636  START();
1637  // Make sure the pool is empty.
1638  masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
1639  ASSERT_LITERAL_POOL_SIZE(0);
1640
1641  Label past_literal0;
1642  Literal<uint32_t> literal0(static_cast<uint32_t>(0x12345678),
1643                             RawLiteral::kManuallyPlaced);
1644  __ Ldr(r0, &literal0);
1645  __ B(&past_literal0);
1646  __ Place(&literal0);
1647  __ Bind(&past_literal0);
1648  __ Ldr(r1, &literal0);
1649
1650  ASSERT_LITERAL_POOL_SIZE(0);
1651
1652  Label past_literal1;
1653  Literal<uint64_t> cafebeefdeadbaba(0xcafebeefdeadbaba,
1654                                     RawLiteral::kManuallyPlaced);
1655  __ B(&past_literal1);
1656  __ Place(&cafebeefdeadbaba);
1657  __ Bind(&past_literal1);
1658  __ Ldrd(r8, r9, &cafebeefdeadbaba);
1659  __ Ldrd(r2, r3, &cafebeefdeadbaba);
1660  ASSERT_LITERAL_POOL_SIZE(0);
1661  END();
1662
1663  RUN();
1664
1665  // Check that the literals loaded correctly.
1666  ASSERT_EQUAL_32(0x12345678, r0);
1667  ASSERT_EQUAL_32(0x12345678, r1);
1668  ASSERT_EQUAL_32(0xdeadbaba, r2);
1669  ASSERT_EQUAL_32(0xcafebeef, r3);
1670  ASSERT_EQUAL_32(0xdeadbaba, r8);
1671  ASSERT_EQUAL_32(0xcafebeef, r9);
1672}
1673
1674
1675TEST(emit_big_pool) {
1676  SETUP();
1677
1678  START();
1679  // Make sure the pool is empty.
1680  ASSERT_LITERAL_POOL_SIZE(0);
1681
1682  Label start;
1683  __ Bind(&start);
1684  for (int i = 1000; i > 0; --i) {
1685    __ Ldr(r0, i);
1686  }
1687
1688  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&start) == 4000);
1689
1690  ASSERT_LITERAL_POOL_SIZE(4000);
1691  END();
1692
1693  RUN();
1694
1695  // Check that the literals loaded correctly.
1696  ASSERT_EQUAL_32(1, r0);
1697
1698  TEARDOWN();
1699}
1700
1701
1702TEST_T32(too_far_cbz) {
1703  SETUP();
1704
1705  START();
1706  Label start;
1707  Label end;
1708  Label exit;
1709  __ Mov(r0, 0);
1710  __ B(&start);
1711  __ Bind(&end);
1712  __ Mov(r0, 1);
1713  __ B(&exit);
1714  __ Bind(&start);
1715  // Cbz is only defined for forward jump. Check that it will work (substituted
1716  // by Cbnz/B).
1717  __ Cbz(r0, &end);
1718  __ Bind(&exit);
1719  END();
1720
1721  RUN();
1722
1723  ASSERT_EQUAL_32(1, r0);
1724}
1725
1726
1727TEST_T32(close_cbz) {
1728  SETUP();
1729
1730  START();
1731  Label first;
1732  Label second;
1733  __ Mov(r0, 0);
1734  __ Mov(r1, 0);
1735  __ Mov(r2, 0);
1736  __ Cbz(r0, &first);
1737  __ Bind(&first);
1738  __ Mov(r1, 1);
1739  __ Cbnz(r0, &second);
1740  __ Bind(&second);
1741  __ Mov(r2, 2);
1742  END();
1743
1744  RUN();
1745
1746  ASSERT_EQUAL_32(0, r0);
1747  ASSERT_EQUAL_32(1, r1);
1748  ASSERT_EQUAL_32(2, r2);
1749}
1750
1751
1752TEST_T32(close_cbz2) {
1753  SETUP();
1754
1755  START();
1756  Label first;
1757  Label second;
1758  __ Mov(r0, 0);
1759  __ Mov(r1, 0);
1760  __ Mov(r2, 0);
1761  __ Cmp(r0, 0);
1762  __ B(ne, &first);
1763  __ B(gt, &second);
1764  __ Cbz(r0, &first);
1765  __ Bind(&first);
1766  __ Mov(r1, 1);
1767  __ Cbnz(r0, &second);
1768  __ Bind(&second);
1769  __ Mov(r2, 2);
1770  END();
1771
1772  RUN();
1773
1774  ASSERT_EQUAL_32(0, r0);
1775  ASSERT_EQUAL_32(1, r1);
1776  ASSERT_EQUAL_32(2, r2);
1777}
1778
1779
1780TEST_T32(not_close_cbz) {
1781  SETUP();
1782
1783  START();
1784  Label first;
1785  Label second;
1786  __ Cbz(r0, &first);
1787  __ B(ne, &first);
1788  __ Bind(&first);
1789  __ Cbnz(r0, &second);
1790  __ B(gt, &second);
1791  __ Bind(&second);
1792  END();
1793
1794  RUN();
1795}
1796
1797
1798TEST_T32(veneers) {
1799  SETUP();
1800
1801  START();
1802  Label zero;
1803  Label exit;
1804  __ Mov(r0, 0);
1805  // Create one literal pool entry.
1806  __ Ldr(r1, 0x12345678);
1807  ASSERT_LITERAL_POOL_SIZE(4);
1808  __ Cbz(r0, &zero);
1809  __ Mov(r0, 1);
1810  __ B(&exit);
1811  for (int i = 32; i > 0; i--) {
1812    __ Mov(r1, 0);
1813  }
1814  // Assert that the literal pool has been generated with the veneers.
1815  ASSERT_LITERAL_POOL_SIZE(0);
1816  __ Bind(&zero);
1817  __ Mov(r0, 2);
1818  __ Bind(&exit);
1819  END();
1820
1821  RUN();
1822
1823  ASSERT_EQUAL_32(2, r0);
1824  ASSERT_EQUAL_32(0x12345678, r1);
1825}
1826
1827
1828// This test checks that veneers are sorted. If not, the test failed as the
1829// veneer for "exit" is emitted before the veneer for "zero" and the "zero"
1830// veneer is out of range for Cbz.
1831TEST_T32(veneers_labels_sort) {
1832  SETUP();
1833
1834  START();
1835  Label start;
1836  Label zero;
1837  Label exit;
1838  __ Movs(r0, 0);
1839  __ B(ne, &exit);
1840  __ B(&start);
1841  for (int i = 1048400; i > 0; i -= 4) {
1842    __ Mov(r1, 0);
1843  }
1844  __ Bind(&start);
1845  __ Cbz(r0, &zero);
1846  __ Mov(r0, 1);
1847  __ B(&exit);
1848  for (int i = 32; i > 0; i--) {
1849    __ Mov(r1, 0);
1850  }
1851  __ Bind(&zero);
1852  __ Mov(r0, 2);
1853  __ Bind(&exit);
1854  END();
1855
1856  RUN();
1857
1858  ASSERT_EQUAL_32(2, r0);
1859}
1860
1861// Check that a label bound within the assembler is effectively removed from
1862// the veneer pool.
1863TEST_T32(veneer_bind) {
1864  SETUP();
1865  Label target;
1866  __ Cbz(r0, &target);
1867  __ Nop();
1868
1869  {
1870    // Bind the target label using the `Assembler`.
1871    AssemblerAccurateScope aas(&masm,
1872                               kMaxInstructionSizeInBytes,
1873                               CodeBufferCheckScope::kMaximumSize);
1874    __ bind(&target);
1875    __ nop();
1876  }
1877
1878  VIXL_CHECK(target.IsBound());
1879  VIXL_CHECK(masm.VeneerPoolIsEmpty());
1880
1881  END();
1882}
1883
1884// This test check that we can update a Literal after usage.
1885TEST(literal_update) {
1886  SETUP();
1887
1888  START();
1889  Label exit;
1890  Literal<uint32_t>* a32 =
1891      new Literal<uint32_t>(0xabcdef01, RawLiteral::kDeletedOnPoolDestruction);
1892  Literal<uint64_t>* a64 =
1893      new Literal<uint64_t>(
1894          UINT64_C(0xabcdef01abcdef01), RawLiteral::kDeletedOnPoolDestruction);
1895  __ Ldr(r0, a32);
1896  __ Ldrd(r2, r3, a64);
1897  __ EmitLiteralPool();
1898  Literal<uint32_t>* b32 =
1899      new Literal<uint32_t>(0x10fedcba, RawLiteral::kDeletedOnPoolDestruction);
1900  Literal<uint64_t>* b64 =
1901      new Literal<uint64_t>(
1902          UINT64_C(0x10fedcba10fedcba), RawLiteral::kDeletedOnPoolDestruction);
1903  __ Ldr(r1, b32);
1904  __ Ldrd(r4, r5, b64);
1905  // Update literals' values. "a32" and "a64" are already emitted. "b32" and
1906  // "b64" will only be emitted when "END()" will be called.
1907  a32->UpdateValue(0x12345678, masm.GetBuffer());
1908  a64->UpdateValue(UINT64_C(0x13579bdf02468ace), masm.GetBuffer());
1909  b32->UpdateValue(0x87654321, masm.GetBuffer());
1910  b64->UpdateValue(UINT64_C(0x1032547698badcfe), masm.GetBuffer());
1911  END();
1912
1913  RUN();
1914
1915  ASSERT_EQUAL_32(0x12345678, r0);
1916  ASSERT_EQUAL_32(0x87654321, r1);
1917  ASSERT_EQUAL_32(0x02468ace, r2);
1918  ASSERT_EQUAL_32(0x13579bdf, r3);
1919  ASSERT_EQUAL_32(0x98badcfe, r4);
1920  ASSERT_EQUAL_32(0x10325476, r5);
1921}
1922
1923
1924void SwitchCase(JumpTableBase* switch_, uint32_t case_index,
1925                InstructionSet isa) {
1926  SETUP();
1927
1928  START();
1929
1930  __ Mov(r1, case_index);
1931  __ Switch(r1, switch_);
1932
1933  __ Case(switch_, 0);
1934  __ Mov(r0, 1);
1935  __ Break(switch_);
1936
1937  __ Case(switch_, 1);
1938  __ Mov(r0, 2);
1939  __ Break(switch_);
1940
1941  __ Case(switch_, 2);
1942  __ Mov(r0, 4);
1943  __ Break(switch_);
1944
1945  __ Case(switch_, 3);
1946  __ Mov(r0, 8);
1947  __ Break(switch_);
1948
1949  __ Default(switch_);
1950  __ Mov(r0, -1);
1951
1952  __ EndSwitch(switch_);
1953
1954
1955  END();
1956
1957  RUN();
1958
1959  if (case_index < 4) {
1960    ASSERT_EQUAL_32(1 << case_index, r0);
1961  } else {
1962    ASSERT_EQUAL_32(-1, r0);
1963  }
1964}
1965
1966
1967TEST(switch_case_8) {
1968  for (int i = 0; i < 5; i++) {
1969    JumpTable8bitOffset switch_(5);
1970    SwitchCase(&switch_, i, isa);
1971  }
1972}
1973
1974
1975TEST(switch_case_16) {
1976  for (int i = 0; i < 5; i++) {
1977    JumpTable16bitOffset switch_(5);
1978    SwitchCase(&switch_, i, isa);
1979  }
1980}
1981
1982
1983TEST(switch_case_32) {
1984  for (int i = 0; i < 5; i++) {
1985    JumpTable32bitOffset switch_(5);
1986    SwitchCase(&switch_, i, isa);
1987  }
1988}
1989
1990
1991TEST(claim_peek_poke) {
1992  SETUP();
1993
1994  START();
1995
1996  Label start;
1997  __ Bind(&start);
1998  __ Claim(0);
1999  __ Drop(0);
2000  VIXL_CHECK((masm.GetCursorOffset() - start.GetLocation()) == 0);
2001
2002  __ Claim(32);
2003  __ Ldr(r0, 0xcafe0000);
2004  __ Ldr(r1, 0xcafe0001);
2005  __ Ldr(r2, 0xcafe0002);
2006  __ Poke(r0, 0);
2007  __ Poke(r1, 4);
2008  __ Poke(r2, 8);
2009  __ Peek(r2, 0);
2010  __ Peek(r0, 4);
2011  __ Peek(r1, 8);
2012  __ Drop(32);
2013
2014  END();
2015
2016  RUN();
2017
2018  ASSERT_EQUAL_32(0xcafe0001, r0);
2019  ASSERT_EQUAL_32(0xcafe0002, r1);
2020  ASSERT_EQUAL_32(0xcafe0000, r2);
2021
2022  TEARDOWN();
2023}
2024
2025
2026TEST(msr_i) {
2027  SETUP();
2028
2029  START();
2030  __ Mov(r0, 0xdead);
2031  __ Mov(r1, 0xdead);
2032  __ Mov(r2, 0xdead);
2033  __ Mov(r3, 0xb);
2034  __ Msr(APSR_nzcvqg, 0);
2035  __ Mrs(r0, APSR);
2036  __ Msr(APSR_nzcvqg, 0xffffffff);
2037  __ Mrs(r1, APSR);
2038  // Only modify nzcvq => keep previous g.
2039  __ Lsl(r4, r3, 28);
2040  __ Msr(APSR_nzcvq, r4);
2041  __ Mrs(r2, APSR);
2042  END();
2043
2044  RUN();
2045
2046  ASSERT_EQUAL_32(0x10, r0);
2047  ASSERT_EQUAL_32(0xf80f0010, r1);
2048  ASSERT_EQUAL_32(0xb00f0010, r2);
2049
2050  TEARDOWN();
2051}
2052
2053
2054TEST(printf) {
2055  SETUP();
2056
2057  START();
2058  __ Mov(r0, 0xb00e0000);
2059  __ Msr(APSR_nzcvqg, r0);
2060  __ Mov(r0, sp);
2061  __ Printf("sp=%x\n", r0);
2062//  __ Printf("Hello world!\n");
2063  __ Mov(r0, 0x1234);
2064  __ Mov(r1, 0x5678);
2065  StringLiteral literal("extra string");
2066  __ Adr(r2, &literal);
2067  __ Mov(r3, 5);
2068  __ Mov(r4, 0xdead4444);
2069  __ Mov(r5, 0xdead5555);
2070  __ Mov(r6, 0xdead6666);
2071  __ Mov(r7, 0xdead7777);
2072  __ Mov(r8, 0xdead8888);
2073  __ Mov(r9, 0xdead9999);
2074  __ Mov(r10, 0xdeadaaaa);
2075  __ Mov(r11, 0xdeadbbbb);
2076  __ Vldr(d0, 1.2345);
2077  __ Vldr(d1, 2.9876);
2078  __ Vldr(s4, 1.3333);
2079  __ Vldr(s5, 3.21);
2080  __ Vldr(d3, 3.333);
2081  __ Vldr(d4, 4.444);
2082  __ Vldr(d5, 5.555);
2083  __ Vldr(d6, 6.666);
2084  __ Vldr(d7, 7.777);
2085  __ Vldr(d8, 8.888);
2086  __ Vldr(d9, 9.999);
2087  __ Vldr(d10, 10.000);
2088  __ Vldr(d11, 11.111);
2089  __ Vldr(d12, 12.222);
2090  __ Vldr(d13, 13.333);
2091  __ Vldr(d14, 14.444);
2092  __ Vldr(d15, 15.555);
2093  __ Vldr(d16, 16.666);
2094  __ Vldr(d17, 17.777);
2095  __ Vldr(d18, 18.888);
2096  __ Vldr(d19, 19.999);
2097  __ Vldr(d20, 20.000);
2098  __ Vldr(d21, 21.111);
2099  __ Vldr(d22, 22.222);
2100  __ Vldr(d23, 23.333);
2101  __ Vldr(d24, 24.444);
2102  __ Vldr(d25, 25.555);
2103  __ Vldr(d26, 26.666);
2104  __ Vldr(d27, 27.777);
2105  __ Vldr(d28, 28.888);
2106  __ Vldr(d29, 29.999);
2107  __ Vldr(d30, 30.000);
2108  __ Vldr(d31, 31.111);
2109  {
2110    UseScratchRegisterScope temps(&masm);
2111    // For effective use as an inspection tool, Printf must work without any
2112    // scratch registers.
2113    VIXL_CHECK(r12.Is(temps.Acquire()));
2114    __ Mov(r12, 0xdeadcccc);
2115    VIXL_CHECK(masm.GetScratchRegisterList()->IsEmpty());
2116
2117    __ Printf("%% r0=%x r1=%x str=<%.*s>\n", r0, r1, r3, r2);
2118    __ Printf("r0=%d r1=%d str=<%s>\n", r0, r1, r2);
2119    __ Printf("d0=%g\n", d0);
2120    __ Printf("s4=%g\n", s4);
2121    __ Printf("d0=%g d1=%g s4=%g s5=%g\n", d0, d1, s4, s5);
2122    __ Printf("d0=%g r0=%x s4=%g r1=%x\n", d0, r0, s4, r1);
2123    __ Printf("r0=%x d0=%g r1=%x s4=%g\n", r0, d0, r1, s4);
2124    __ Mov(r0, sp);
2125    __ Printf("sp=%x\n", r0);
2126    __ Mrs(r0, APSR);
2127    // Only keep R/W fields.
2128    __ Mov(r2, 0xf80f0200);
2129    __ And(r0, r0, r2);
2130  }
2131  END();
2132
2133  RUN();
2134
2135  ASSERT_EQUAL_32(0xb00e0000, r0);
2136  ASSERT_EQUAL_32(0x5678, r1);
2137  ASSERT_EQUAL_32(5, r3);
2138  ASSERT_EQUAL_32(0xdead4444, r4);
2139  ASSERT_EQUAL_32(0xdead5555, r5);
2140  ASSERT_EQUAL_32(0xdead6666, r6);
2141  ASSERT_EQUAL_32(0xdead7777, r7);
2142  ASSERT_EQUAL_32(0xdead8888, r8);
2143  ASSERT_EQUAL_32(0xdead9999, r9);
2144  ASSERT_EQUAL_32(0xdeadaaaa, r10);
2145  ASSERT_EQUAL_32(0xdeadbbbb, r11);
2146  ASSERT_EQUAL_32(0xdeadcccc, r12);
2147  ASSERT_EQUAL_FP64(1.2345, d0);
2148  ASSERT_EQUAL_FP64(2.9876, d1);
2149  ASSERT_EQUAL_FP32(1.3333, s4);
2150  ASSERT_EQUAL_FP32(3.21, s5);
2151  ASSERT_EQUAL_FP64(4.444, d4);
2152  ASSERT_EQUAL_FP64(5.555, d5);
2153  ASSERT_EQUAL_FP64(6.666, d6);
2154  ASSERT_EQUAL_FP64(7.777, d7);
2155  ASSERT_EQUAL_FP64(8.888, d8);
2156  ASSERT_EQUAL_FP64(9.999, d9);
2157  ASSERT_EQUAL_FP64(10.000, d10);
2158  ASSERT_EQUAL_FP64(11.111, d11);
2159  ASSERT_EQUAL_FP64(12.222, d12);
2160  ASSERT_EQUAL_FP64(13.333, d13);
2161  ASSERT_EQUAL_FP64(14.444, d14);
2162  ASSERT_EQUAL_FP64(15.555, d15);
2163  ASSERT_EQUAL_FP64(16.666, d16);
2164  ASSERT_EQUAL_FP64(17.777, d17);
2165  ASSERT_EQUAL_FP64(18.888, d18);
2166  ASSERT_EQUAL_FP64(19.999, d19);
2167  ASSERT_EQUAL_FP64(20.000, d20);
2168  ASSERT_EQUAL_FP64(21.111, d21);
2169  ASSERT_EQUAL_FP64(22.222, d22);
2170  ASSERT_EQUAL_FP64(23.333, d23);
2171  ASSERT_EQUAL_FP64(24.444, d24);
2172  ASSERT_EQUAL_FP64(25.555, d25);
2173  ASSERT_EQUAL_FP64(26.666, d26);
2174  ASSERT_EQUAL_FP64(27.777, d27);
2175  ASSERT_EQUAL_FP64(28.888, d28);
2176  ASSERT_EQUAL_FP64(29.999, d29);
2177  ASSERT_EQUAL_FP64(30.000, d30);
2178  ASSERT_EQUAL_FP64(31.111, d31);
2179
2180  TEARDOWN();
2181}
2182
2183TEST(printf2) {
2184  SETUP();
2185
2186  START();
2187  __ Mov(r0, 0x1234);
2188  __ Mov(r1, 0x5678);
2189  __ Vldr(d0, 1.2345);
2190  __ Vldr(s2, 2.9876);
2191  __ Printf("d0=%g d1=%g r0=%x r1=%x\n", d0, s2, r0, r1);
2192  END();
2193
2194  RUN();
2195
2196  TEARDOWN();
2197}
2198
2199
2200TEST(use_scratch_register_scope_v_registers) {
2201  SETUP();
2202  {
2203    UseScratchRegisterScope temps(&masm);
2204    temps.Include(VRegisterList(q0, q1, q2, q3));
2205
2206    // This test assumes that low-numbered registers are allocated first. The
2207    // implementation is allowed to use a different strategy; if it does, the
2208    // test will need to be updated.
2209    // TODO: Write more flexible (and thorough) tests.
2210
2211    VIXL_CHECK(q0.Is(temps.AcquireQ()));
2212    VIXL_CHECK(!temps.IsAvailable(q0));
2213    VIXL_CHECK(!temps.IsAvailable(d0));
2214    VIXL_CHECK(!temps.IsAvailable(d1));
2215    VIXL_CHECK(!temps.IsAvailable(s0));
2216    VIXL_CHECK(!temps.IsAvailable(s1));
2217    VIXL_CHECK(!temps.IsAvailable(s2));
2218    VIXL_CHECK(!temps.IsAvailable(s3));
2219
2220    VIXL_CHECK(d2.Is(temps.AcquireV(64)));
2221    VIXL_CHECK(!temps.IsAvailable(q1));
2222    VIXL_CHECK(!temps.IsAvailable(d2));
2223    VIXL_CHECK(temps.IsAvailable(d3));
2224    VIXL_CHECK(!temps.IsAvailable(s4));
2225    VIXL_CHECK(!temps.IsAvailable(s5));
2226    VIXL_CHECK(temps.IsAvailable(s6));
2227    VIXL_CHECK(temps.IsAvailable(s7));
2228
2229    VIXL_CHECK(s6.Is(temps.AcquireS()));
2230    VIXL_CHECK(!temps.IsAvailable(d3));
2231    VIXL_CHECK(!temps.IsAvailable(s6));
2232    VIXL_CHECK(temps.IsAvailable(s7));
2233
2234    VIXL_CHECK(q2.Is(temps.AcquireV(128)));
2235    VIXL_CHECK(!temps.IsAvailable(q2));
2236    VIXL_CHECK(!temps.IsAvailable(d4));
2237    VIXL_CHECK(!temps.IsAvailable(d5));
2238    VIXL_CHECK(!temps.IsAvailable(s8));
2239    VIXL_CHECK(!temps.IsAvailable(s9));
2240    VIXL_CHECK(!temps.IsAvailable(s10));
2241    VIXL_CHECK(!temps.IsAvailable(s11));
2242    VIXL_CHECK(temps.IsAvailable(s7));
2243
2244    VIXL_CHECK(d6.Is(temps.AcquireD()));
2245    VIXL_CHECK(!temps.IsAvailable(q3));
2246    VIXL_CHECK(!temps.IsAvailable(d6));
2247    VIXL_CHECK(temps.IsAvailable(d7));
2248    VIXL_CHECK(!temps.IsAvailable(s12));
2249    VIXL_CHECK(!temps.IsAvailable(s13));
2250    VIXL_CHECK(temps.IsAvailable(s14));
2251    VIXL_CHECK(temps.IsAvailable(s15));
2252    VIXL_CHECK(temps.IsAvailable(s7));
2253
2254    VIXL_CHECK(s7.Is(temps.AcquireS()));
2255  }
2256  TEARDOWN();
2257}
2258
2259
2260template<typename T>
2261void CheckInstructionSetA32(const T& assm) {
2262  VIXL_CHECK(assm.IsUsingA32());
2263  VIXL_CHECK(!assm.IsUsingT32());
2264  VIXL_CHECK(assm.GetInstructionSetInUse() == A32);
2265}
2266
2267
2268template<typename T>
2269void CheckInstructionSetT32(const T& assm) {
2270  VIXL_CHECK(assm.IsUsingT32());
2271  VIXL_CHECK(!assm.IsUsingA32());
2272  VIXL_CHECK(assm.GetInstructionSetInUse() == T32);
2273}
2274
2275
2276TEST_NOASM(set_isa_constructors) {
2277  byte buffer[1024];
2278
2279  // A32 by default.
2280  CheckInstructionSetA32(Assembler());
2281  CheckInstructionSetA32(Assembler(1024));
2282  CheckInstructionSetA32(Assembler(buffer, sizeof(buffer)));
2283  // Explicit A32.
2284  CheckInstructionSetA32(Assembler(A32));
2285  CheckInstructionSetA32(Assembler(1024, A32));
2286  CheckInstructionSetA32(Assembler(buffer, sizeof(buffer), A32));
2287  // Explicit T32.
2288  CheckInstructionSetT32(Assembler(T32));
2289  CheckInstructionSetT32(Assembler(1024, T32));
2290  CheckInstructionSetT32(Assembler(buffer, sizeof(buffer), T32));
2291
2292  // A32 by default.
2293  CheckInstructionSetA32(MacroAssembler());
2294  CheckInstructionSetA32(MacroAssembler(1024));
2295  CheckInstructionSetA32(MacroAssembler(buffer, sizeof(buffer)));
2296  // Explicit A32.
2297  CheckInstructionSetA32(MacroAssembler(A32));
2298  CheckInstructionSetA32(MacroAssembler(1024, A32));
2299  CheckInstructionSetA32(MacroAssembler(buffer, sizeof(buffer), A32));
2300  // Explicit T32.
2301  CheckInstructionSetT32(MacroAssembler(T32));
2302  CheckInstructionSetT32(MacroAssembler(1024, T32));
2303  CheckInstructionSetT32(MacroAssembler(buffer, sizeof(buffer), T32));
2304}
2305
2306
2307TEST_NOASM(set_isa_empty) {
2308  // It is possible to change the instruction set if no instructions have yet
2309  // been generated.
2310  Assembler assm;
2311  CheckInstructionSetA32(assm);
2312  assm.UseT32();
2313  CheckInstructionSetT32(assm);
2314  assm.UseA32();
2315  CheckInstructionSetA32(assm);
2316  assm.UseInstructionSet(T32);
2317  CheckInstructionSetT32(assm);
2318  assm.UseInstructionSet(A32);
2319  CheckInstructionSetA32(assm);
2320
2321  MacroAssembler masm;
2322  CheckInstructionSetA32(masm);
2323  masm.UseT32();
2324  CheckInstructionSetT32(masm);
2325  masm.UseA32();
2326  CheckInstructionSetA32(masm);
2327  masm.UseInstructionSet(T32);
2328  CheckInstructionSetT32(masm);
2329  masm.UseInstructionSet(A32);
2330  CheckInstructionSetA32(masm);
2331}
2332
2333
2334TEST_NOASM(set_isa_noop) {
2335  // It is possible to call a no-op UseA32/T32 or UseInstructionSet even if
2336  // one or more instructions have been generated.
2337  {
2338    Assembler assm(A32);
2339    CheckInstructionSetA32(assm);
2340    assm.bx(lr);
2341    VIXL_ASSERT(assm.GetCursorOffset() > 0);
2342    CheckInstructionSetA32(assm);
2343    assm.UseA32();
2344    CheckInstructionSetA32(assm);
2345    assm.UseInstructionSet(A32);
2346    CheckInstructionSetA32(assm);
2347    assm.FinalizeCode();
2348  }
2349  {
2350    Assembler assm(T32);
2351    CheckInstructionSetT32(assm);
2352    assm.bx(lr);
2353    VIXL_ASSERT(assm.GetCursorOffset() > 0);
2354    CheckInstructionSetT32(assm);
2355    assm.UseT32();
2356    CheckInstructionSetT32(assm);
2357    assm.UseInstructionSet(T32);
2358    CheckInstructionSetT32(assm);
2359    assm.FinalizeCode();
2360  }
2361  {
2362    MacroAssembler masm(A32);
2363    CheckInstructionSetA32(masm);
2364    masm.Bx(lr);
2365    VIXL_ASSERT(masm.GetCursorOffset() > 0);
2366    CheckInstructionSetA32(masm);
2367    masm.UseA32();
2368    CheckInstructionSetA32(masm);
2369    masm.UseInstructionSet(A32);
2370    CheckInstructionSetA32(masm);
2371    masm.FinalizeCode();
2372  }
2373  {
2374    MacroAssembler masm(T32);
2375    CheckInstructionSetT32(masm);
2376    masm.Bx(lr);
2377    VIXL_ASSERT(masm.GetCursorOffset() > 0);
2378    CheckInstructionSetT32(masm);
2379    masm.UseT32();
2380    CheckInstructionSetT32(masm);
2381    masm.UseInstructionSet(T32);
2382    CheckInstructionSetT32(masm);
2383    masm.FinalizeCode();
2384  }
2385}
2386
2387
2388TEST(logical_arithmetic_identities) {
2389  SETUP();
2390
2391  START();
2392
2393  Label blob_1;
2394  __ Bind(&blob_1);
2395  __ Add(r0, r0, 0);
2396  __ And(r0, r0, 0xffffffff);
2397  __ Bic(r0, r0, 0);
2398  __ Eor(r0, r0, 0);
2399  __ Orn(r0, r0, 0xffffffff);
2400  __ Orr(r0, r0, 0);
2401  __ Sub(r0, r0, 0);
2402  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_1) == 0);
2403
2404  Label blob_2;
2405  __ Bind(&blob_2);
2406  __ Adds(r0, r0, 0);
2407  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_2) != 0);
2408
2409  Label blob_3;
2410  __ Bind(&blob_3);
2411  __ Ands(r0, r0, 0);
2412  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_3) != 0);
2413
2414  Label blob_4;
2415  __ Bind(&blob_4);
2416  __ Bics(r0, r0, 0);
2417  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_4) != 0);
2418
2419  Label blob_5;
2420  __ Bind(&blob_5);
2421  __ Eors(r0, r0, 0);
2422  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_5) != 0);
2423
2424  Label blob_6;
2425  __ Bind(&blob_6);
2426  __ Orns(r0, r0, 0);
2427  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_6) != 0);
2428
2429  Label blob_7;
2430  __ Bind(&blob_7);
2431  __ Orrs(r0, r0, 0);
2432  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_7) != 0);
2433
2434  Label blob_8;
2435  __ Bind(&blob_8);
2436  __ Subs(r0, r0, 0);
2437  VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&blob_8) != 0);
2438
2439  __ Mov(r0, 0xbad);
2440  __ And(r1, r0, 0);
2441  __ Bic(r2, r0, 0xffffffff);
2442  __ Eor(r3, r0, 0xffffffff);
2443  __ Orn(r4, r0, 0);
2444  __ Orr(r5, r0, 0xffffffff);
2445
2446  END();
2447
2448  RUN();
2449
2450  ASSERT_EQUAL_32(0xbad, r0);
2451  ASSERT_EQUAL_32(0, r1);
2452  ASSERT_EQUAL_32(0, r2);
2453  ASSERT_EQUAL_32(~0xbad, r3);
2454  ASSERT_EQUAL_32(0xffffffff, r4);
2455  ASSERT_EQUAL_32(0xffffffff, r5);
2456
2457  TEARDOWN();
2458}
2459
2460
2461TEST(scratch_register_checks) {
2462  // It is unsafe for users to use registers that the MacroAssembler is also
2463  // using as scratch registers. This test checks the MacroAssembler's checking
2464  // mechanism itself.
2465  SETUP();
2466  {
2467    UseScratchRegisterScope temps(&masm);
2468    // 'ip' is a scratch register by default.
2469    VIXL_CHECK(
2470        masm.GetScratchRegisterList()->GetList() == (1u << ip.GetCode()));
2471    VIXL_CHECK(temps.IsAvailable(ip));
2472
2473    // Integer registers have no complicated aliasing so
2474    // masm.AliasesAvailableScratchRegister(reg) == temps.IsAvailable(reg).
2475    for (unsigned i = 0; i < kNumberOfRegisters; i++) {
2476      Register reg(i);
2477      VIXL_CHECK(masm.AliasesAvailableScratchRegister(reg) ==
2478                 temps.IsAvailable(reg));
2479    }
2480  }
2481
2482  TEARDOWN();
2483}
2484
2485
2486TEST(scratch_register_checks_v) {
2487  // It is unsafe for users to use registers that the MacroAssembler is also
2488  // using as scratch registers. This test checks the MacroAssembler's checking
2489  // mechanism itself.
2490  SETUP();
2491  {
2492    UseScratchRegisterScope temps(&masm);
2493    // There is no default floating-point scratch register. Add temps of various
2494    // sizes to check handling of aliased registers.
2495    VIXL_CHECK(masm.GetScratchVRegisterList()->GetList() == 0);
2496    temps.Include(q15);
2497    temps.Include(d15);
2498    temps.Include(s15);
2499    temps.Include(d4);
2500    temps.Include(d5);
2501    temps.Include(s24);
2502    temps.Include(s25);
2503    temps.Include(s26);
2504    temps.Include(s27);
2505    temps.Include(q0);
2506    // See VRegisterList for details of the list encoding.
2507    VIXL_CHECK(masm.GetScratchVRegisterList()->GetList() ==
2508               UINT64_C(0xf0000000cf008f0f));
2509    //                    |       ||  || |
2510    //                   q15    d15|  || q0
2511    //                        s24-s27 |d4-d5
2512    //                               s15
2513
2514    // Simple checks: Included registers are available.
2515    VIXL_CHECK(temps.IsAvailable(q15));
2516    VIXL_CHECK(temps.IsAvailable(d15));
2517    VIXL_CHECK(temps.IsAvailable(s15));
2518    VIXL_CHECK(temps.IsAvailable(d4));
2519    VIXL_CHECK(temps.IsAvailable(d5));
2520    VIXL_CHECK(temps.IsAvailable(s24));
2521    VIXL_CHECK(temps.IsAvailable(s25));
2522    VIXL_CHECK(temps.IsAvailable(s26));
2523    VIXL_CHECK(temps.IsAvailable(s27));
2524    VIXL_CHECK(temps.IsAvailable(q0));
2525
2526    // Each available S register should mark the corresponding D and Q registers
2527    // as aliasing an available scratch register.
2528    for (unsigned s = 0; s < kNumberOfSRegisters; s++) {
2529      if (temps.IsAvailable(SRegister(s))) {
2530        VIXL_CHECK(masm.AliasesAvailableScratchRegister(SRegister(s)));
2531        VIXL_CHECK(masm.AliasesAvailableScratchRegister(DRegister(s / 2)));
2532        VIXL_CHECK(masm.AliasesAvailableScratchRegister(QRegister(s / 4)));
2533      } else {
2534        // AliasesAvailableScratchRegiters == IsAvailable for S registers.
2535        VIXL_CHECK(!masm.AliasesAvailableScratchRegister(SRegister(s)));
2536      }
2537    }
2538
2539    // Similar checks for high D registers.
2540    unsigned first_high_d_register = kNumberOfSRegisters / 2;
2541    for (unsigned d = first_high_d_register; d < kMaxNumberOfDRegisters; d++) {
2542      if (temps.IsAvailable(DRegister(d))) {
2543        VIXL_CHECK(masm.AliasesAvailableScratchRegister(DRegister(d)));
2544        VIXL_CHECK(masm.AliasesAvailableScratchRegister(QRegister(d / 2)));
2545      } else {
2546        // AliasesAvailableScratchRegiters == IsAvailable for high D registers.
2547        VIXL_CHECK(!masm.AliasesAvailableScratchRegister(DRegister(d)));
2548      }
2549    }
2550  }
2551  TEARDOWN();
2552}
2553
2554
2555TEST(nop) {
2556  SETUP();
2557
2558  Label start;
2559  __ Bind(&start);
2560  __ Nop();
2561  size_t nop_size = (isa == T32) ? k16BitT32InstructionSizeInBytes
2562                                 : kA32InstructionSizeInBytes;
2563  // `MacroAssembler::Nop` must generate at least one nop.
2564  VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&start) >= nop_size);
2565
2566  masm.FinalizeCode();
2567
2568  TEARDOWN();
2569}
2570
2571
2572// Check that `GetMarginBeforeLiteralEmission()` is precise.
2573TEST(literal_pool_margin) {
2574  SETUP();
2575
2576  START();
2577
2578  VIXL_CHECK(masm.VeneerPoolIsEmpty());
2579  VIXL_CHECK(masm.LiteralPoolIsEmpty());
2580
2581  // Create a single literal.
2582  __ Ldrd(r0, r1, 0x1234567890abcdef);
2583
2584  VIXL_CHECK(!masm.LiteralPoolIsEmpty());
2585
2586  // Generate code to fill all the margin we have before generating the literal
2587  // pool.
2588  int32_t margin = masm.GetMarginBeforeLiteralEmission();
2589  int32_t end = masm.GetCursorOffset() + margin;
2590  {
2591    AssemblerAccurateScope scope(&masm,
2592                                 margin,
2593                                 CodeBufferCheckScope::kExactSize);
2594    // Opening the scope should not have triggered the emission of the literal
2595    // pool.
2596    VIXL_CHECK(!masm.LiteralPoolIsEmpty());
2597    while (masm.GetCursorOffset() < end) {
2598      __ nop();
2599    }
2600    VIXL_CHECK(masm.GetCursorOffset() == end);
2601  }
2602
2603  // There should be no margin left to emit the literal pool.
2604  VIXL_CHECK(!masm.LiteralPoolIsEmpty());
2605  VIXL_CHECK(masm.GetMarginBeforeLiteralEmission() == 0);
2606
2607  // So emitting a single instruction should force emission of the pool.
2608  __ Nop();
2609  VIXL_CHECK(masm.LiteralPoolIsEmpty());
2610  END();
2611
2612  RUN();
2613
2614  // Check that the literals loaded correctly.
2615  ASSERT_EQUAL_32(0x90abcdef, r0);
2616  ASSERT_EQUAL_32(0x12345678, r1);
2617
2618  TEARDOWN();
2619}
2620
2621
2622// Check that `GetMarginBeforeVeneerEmission()` is precise.
2623TEST(veneer_pool_margin) {
2624  SETUP();
2625
2626  START();
2627
2628  VIXL_CHECK(masm.VeneerPoolIsEmpty());
2629  VIXL_CHECK(masm.LiteralPoolIsEmpty());
2630
2631  // Create a single veneer.
2632  Label target;
2633  __ B(eq, &target);
2634
2635  VIXL_CHECK(!masm.VeneerPoolIsEmpty());
2636
2637  // Generate code to fill all the margin we have before generating the veneer
2638  // pool.
2639  int32_t margin = masm.GetMarginBeforeVeneerEmission();
2640  int32_t end = masm.GetCursorOffset() + margin;
2641  {
2642    AssemblerAccurateScope scope(&masm,
2643                                 margin,
2644                                 CodeBufferCheckScope::kExactSize);
2645    // Opening the scope should not have triggered the emission of the veneer
2646    // pool.
2647    VIXL_CHECK(!masm.VeneerPoolIsEmpty());
2648    while (masm.GetCursorOffset() < end) {
2649      __ nop();
2650    }
2651    VIXL_CHECK(masm.GetCursorOffset() == end);
2652  }
2653  // There should be no margin left to emit the veneer pool.
2654  VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() == 0);
2655
2656  // So emitting a single instruction should force emission of the pool.
2657  // We cannot simply check that the veneer pool is empty, because the veneer
2658  // emitted for the CBZ instruction above is itself tracked by the veneer
2659  // mechanisms. Instead, check that some 'unexpected' code is generated.
2660  Label check;
2661  __ Bind(&check);
2662  {
2663    AssemblerAccurateScope scope(&masm,
2664                                 2,
2665                                 AssemblerAccurateScope::kMaximumSize);
2666    // Do not actually generate any code.
2667  }
2668  VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&check) > 0);
2669  __ Bind(&target);
2670  VIXL_CHECK(masm.VeneerPoolIsEmpty());
2671
2672  END();
2673
2674  RUN();
2675
2676  TEARDOWN();
2677}
2678
2679
2680TEST(code_buffer_precise_growth) {
2681  static const int kBaseBufferSize = 16;
2682  MacroAssembler masm(kBaseBufferSize, T32);
2683
2684  VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
2685
2686  {
2687    // Fill the buffer with nops.
2688    AssemblerAccurateScope scope(&masm,
2689                                 kBaseBufferSize,
2690                                 CodeBufferCheckScope::kExactSize);
2691    for (int i = 0; i < kBaseBufferSize; i += k16BitT32InstructionSizeInBytes) {
2692      __ nop();
2693    }
2694  }
2695
2696  // The buffer should not have grown yet.
2697  VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
2698
2699  // Generating a single instruction should force the buffer to grow.
2700  __ Nop();
2701
2702  VIXL_CHECK(masm.GetBuffer()->GetCapacity() > kBaseBufferSize);
2703
2704  masm.FinalizeCode();
2705}
2706
2707
2708TEST_T32(out_of_space_immediately_before_PerformEnsureEmit) {
2709  static const int kBaseBufferSize = 16;
2710  MacroAssembler masm(kBaseBufferSize, T32);
2711
2712  VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
2713
2714  VIXL_CHECK(masm.VeneerPoolIsEmpty());
2715  VIXL_CHECK(masm.LiteralPoolIsEmpty());
2716
2717  // Create a veneer.
2718  Label target;
2719  __ Cbz(r0, &target);
2720
2721  VIXL_CHECK(!masm.VeneerPoolIsEmpty());
2722
2723  VIXL_CHECK(IsUint32(masm.GetBuffer()->GetRemainingBytes()));
2724  uint32_t space = static_cast<uint32_t>(masm.GetBuffer()->GetRemainingBytes());
2725  {
2726    // Fill the buffer with nops.
2727    AssemblerAccurateScope scope(&masm,
2728                                 space,
2729                                 CodeBufferCheckScope::kExactSize);
2730    for (uint32_t i = 0; i < space; i += k16BitT32InstructionSizeInBytes) {
2731      __ nop();
2732    }
2733  }
2734
2735  VIXL_CHECK(!masm.VeneerPoolIsEmpty());
2736
2737  // The buffer should not have grown yet, and there should be no space left.
2738  VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
2739  VIXL_CHECK(masm.GetBuffer()->GetRemainingBytes() == 0);
2740
2741  // Force emission of the veneer, at a point where there is no space available
2742  // in the buffer.
2743  int32_t past_cbz_range = masm.GetMarginBeforeVeneerEmission() + 1;
2744  masm.EnsureEmitFor(past_cbz_range);
2745
2746  __ Bind(&target);
2747
2748  VIXL_CHECK(masm.VeneerPoolIsEmpty());
2749
2750  masm.FinalizeCode();
2751}
2752
2753
2754}  // namespace aarch32
2755}  // namespace vixl
2756