1// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <math.h>
31#include <float.h>
32
33#include "cctest.h"
34#include "test-utils-a64.h"
35#include "a64/macro-assembler-a64.h"
36#include "a64/simulator-a64.h"
37#include "a64/debugger-a64.h"
38#include "a64/disasm-a64.h"
39#include "a64/cpu-a64.h"
40
41namespace vixl {
42
43// Test infrastructure.
44//
45// Tests are functions which accept no parameters and have no return values.
46// The testing code should not perform an explicit return once completed. For
47// example to test the mov immediate instruction a very simple test would be:
48//
49//   TEST(mov_x0_one) {
50//     SETUP();
51//
52//     START();
53//     __ mov(x0, Operand(1));
54//     END();
55//
56//     RUN();
57//
58//     ASSERT_EQUAL_64(1, x0);
59//
60//     TEARDOWN();
61//   }
62//
63// Within a START ... END block all registers but sp can be modified. sp has to
64// be explicitly saved/restored. The END() macro replaces the function return
65// so it may appear multiple times in a test if the test has multiple exit
66// points.
67//
68// Once the test has been run all integer and floating point registers as well
69// as flags are accessible through a RegisterDump instance, see
70// utils-a64.cc for more info on RegisterDump.
71//
72// We provide some helper assert to handle common cases:
73//
74//   ASSERT_EQUAL_32(int32_t, int_32t)
75//   ASSERT_EQUAL_FP32(float, float)
76//   ASSERT_EQUAL_32(int32_t, W register)
77//   ASSERT_EQUAL_FP32(float, S register)
78//   ASSERT_EQUAL_64(int64_t, int_64t)
79//   ASSERT_EQUAL_FP64(double, double)
80//   ASSERT_EQUAL_64(int64_t, X register)
81//   ASSERT_EQUAL_64(X register, X register)
82//   ASSERT_EQUAL_FP64(double, D register)
83//
84// e.g. ASSERT_EQUAL_64(0.5, d30);
85//
86// If more advanced computation is required before the assert then access the
87// RegisterDump named core directly:
88//
89//   ASSERT_EQUAL_64(0x1234, core->reg_x0() & 0xffff);
90
91
92#define __ masm.
93#define TEST(name)  TEST_(ASM_##name)
94
95#define BUF_SIZE (4096)
96
97#define SETUP() SETUP_SIZE(BUF_SIZE)
98
99#ifdef USE_SIMULATOR
100
101// Run tests with the simulator.
102#define SETUP_SIZE(buf_size)                                                   \
103  byte* buf = new byte[buf_size];                                              \
104  MacroAssembler masm(buf, buf_size);                                          \
105  Decoder decoder;                                                             \
106  Simulator* simulator = NULL;                                                 \
107  if (Cctest::run_debugger()) {                                                \
108    simulator = new Debugger(&decoder);                                        \
109  } else {                                                                     \
110    simulator = new Simulator(&decoder);                                       \
111    simulator->set_disasm_trace(Cctest::trace_sim());                          \
112  }                                                                            \
113  simulator->set_coloured_trace(Cctest::coloured_trace());                     \
114  simulator->set_instruction_stats(Cctest::instruction_stats());               \
115  RegisterDump core
116
117#define START()                                                                \
118  masm.Reset();                                                                \
119  simulator->ResetState();                                                     \
120  __ PushCalleeSavedRegisters();                                               \
121  if (Cctest::run_debugger()) {                                                \
122    if (Cctest::trace_reg()) {                                                 \
123      __ Trace(LOG_STATE, TRACE_ENABLE);                                       \
124    }                                                                          \
125    if (Cctest::trace_sim()) {                                                 \
126      __ Trace(LOG_DISASM, TRACE_ENABLE);                                      \
127    }                                                                          \
128  }                                                                            \
129  if (Cctest::instruction_stats()) {                                           \
130    __ EnableInstrumentation();                                                \
131  }
132
133#define END()                                                                  \
134  if (Cctest::instruction_stats()) {                                           \
135    __ DisableInstrumentation();                                               \
136  }                                                                            \
137  if (Cctest::run_debugger()) {                                                \
138    __ Trace(LOG_ALL, TRACE_DISABLE);                                          \
139  }                                                                            \
140  core.Dump(&masm);                                                            \
141  __ PopCalleeSavedRegisters();                                                \
142  __ Ret();                                                                    \
143  masm.FinalizeCode()
144
145#define RUN()                                                                  \
146  simulator->RunFrom(reinterpret_cast<Instruction*>(buf))
147
148#define TEARDOWN()                                                             \
149  delete simulator;                                                            \
150  delete[] buf;
151
152#else  // ifdef USE_SIMULATOR.
153// Run the test on real hardware or models.
154#define SETUP_SIZE(size)                                                       \
155  size_t buf_size = size;                                                      \
156  byte* buf = new byte[buf_size];                                              \
157  MacroAssembler masm(buf, buf_size);                                          \
158  RegisterDump core;                                                           \
159  CPU::SetUp()
160
161#define START()                                                                \
162  masm.Reset();                                                                \
163  __ PushCalleeSavedRegisters()
164
165#define END()                                                                  \
166  core.Dump(&masm);                                                            \
167  __ PopCalleeSavedRegisters();                                                \
168  __ Ret();                                                                    \
169  masm.FinalizeCode()
170
171#define RUN()                                                                  \
172  CPU::EnsureIAndDCacheCoherency(buf, buf_size);                               \
173  {                                                                            \
174    void (*test_function)(void);                                               \
175    VIXL_ASSERT(sizeof(buf) == sizeof(test_function));                         \
176    memcpy(&test_function, &buf, sizeof(buf));                                 \
177    test_function();                                                           \
178  }
179
180#define TEARDOWN()                                                             \
181  delete[] buf;
182
183#endif  // ifdef USE_SIMULATOR.
184
185#define ASSERT_EQUAL_NZCV(expected)                                            \
186  assert(EqualNzcv(expected, core.flags_nzcv()))
187
188#define ASSERT_EQUAL_REGISTERS(expected)                                       \
189  assert(EqualRegisters(&expected, &core))
190
191#define ASSERT_EQUAL_32(expected, result)                                      \
192  assert(Equal32(static_cast<uint32_t>(expected), &core, result))
193
194#define ASSERT_EQUAL_FP32(expected, result)                                    \
195  assert(EqualFP32(expected, &core, result))
196
197#define ASSERT_EQUAL_64(expected, result)                                      \
198  assert(Equal64(expected, &core, result))
199
200#define ASSERT_EQUAL_FP64(expected, result)                                    \
201  assert(EqualFP64(expected, &core, result))
202
203#define ASSERT_LITERAL_POOL_SIZE(expected)                                     \
204  assert((expected) == (__ LiteralPoolSize()))
205
206
207TEST(stack_ops) {
208  SETUP();
209
210  START();
211  // save sp.
212  __ Mov(x29, sp);
213
214  // Set the sp to a known value.
215  __ Mov(sp, 0x1004);
216  __ Mov(x0, sp);
217
218  // Add immediate to the sp, and move the result to a normal register.
219  __ Add(sp, sp, 0x50);
220  __ Mov(x1, sp);
221
222  // Add extended to the sp, and move the result to a normal register.
223  __ Mov(x17, 0xfff);
224  __ Add(sp, sp, Operand(x17, SXTB));
225  __ Mov(x2, sp);
226
227  // Create an sp using a logical instruction, and move to normal register.
228  __ Orr(sp, xzr, 0x1fff);
229  __ Mov(x3, sp);
230
231  // Write wsp using a logical instruction.
232  __ Orr(wsp, wzr, 0xfffffff8);
233  __ Mov(x4, sp);
234
235  // Write sp, and read back wsp.
236  __ Orr(sp, xzr, 0xfffffff8);
237  __ Mov(w5, wsp);
238
239  //  restore sp.
240  __ Mov(sp, x29);
241  END();
242
243  RUN();
244
245  ASSERT_EQUAL_64(0x1004, x0);
246  ASSERT_EQUAL_64(0x1054, x1);
247  ASSERT_EQUAL_64(0x1053, x2);
248  ASSERT_EQUAL_64(0x1fff, x3);
249  ASSERT_EQUAL_64(0xfffffff8, x4);
250  ASSERT_EQUAL_64(0xfffffff8, x5);
251
252  TEARDOWN();
253}
254
255
256TEST(mvn) {
257  SETUP();
258
259  START();
260  __ Mvn(w0, 0xfff);
261  __ Mvn(x1, 0xfff);
262  __ Mvn(w2, Operand(w0, LSL, 1));
263  __ Mvn(x3, Operand(x1, LSL, 2));
264  __ Mvn(w4, Operand(w0, LSR, 3));
265  __ Mvn(x5, Operand(x1, LSR, 4));
266  __ Mvn(w6, Operand(w0, ASR, 11));
267  __ Mvn(x7, Operand(x1, ASR, 12));
268  __ Mvn(w8, Operand(w0, ROR, 13));
269  __ Mvn(x9, Operand(x1, ROR, 14));
270  __ Mvn(w10, Operand(w2, UXTB));
271  __ Mvn(x11, Operand(x2, SXTB, 1));
272  __ Mvn(w12, Operand(w2, UXTH, 2));
273  __ Mvn(x13, Operand(x2, SXTH, 3));
274  __ Mvn(x14, Operand(w2, UXTW, 4));
275  __ Mvn(x15, Operand(w2, SXTW, 4));
276  END();
277
278  RUN();
279
280  ASSERT_EQUAL_64(0xfffff000, x0);
281  ASSERT_EQUAL_64(0xfffffffffffff000, x1);
282  ASSERT_EQUAL_64(0x00001fff, x2);
283  ASSERT_EQUAL_64(0x0000000000003fff, x3);
284  ASSERT_EQUAL_64(0xe00001ff, x4);
285  ASSERT_EQUAL_64(0xf0000000000000ff, x5);
286  ASSERT_EQUAL_64(0x00000001, x6);
287  ASSERT_EQUAL_64(0x0000000000000000, x7);
288  ASSERT_EQUAL_64(0x7ff80000, x8);
289  ASSERT_EQUAL_64(0x3ffc000000000000, x9);
290  ASSERT_EQUAL_64(0xffffff00, x10);
291  ASSERT_EQUAL_64(0x0000000000000001, x11);
292  ASSERT_EQUAL_64(0xffff8003, x12);
293  ASSERT_EQUAL_64(0xffffffffffff0007, x13);
294  ASSERT_EQUAL_64(0xfffffffffffe000f, x14);
295  ASSERT_EQUAL_64(0xfffffffffffe000f, x15);
296
297  TEARDOWN();
298}
299
300
301TEST(mov_imm_w) {
302  SETUP();
303
304  START();
305  __ Mov(w0, 0xffffffff);
306  __ Mov(w1, 0xffff1234);
307  __ Mov(w2, 0x1234ffff);
308  __ Mov(w3, 0x00000000);
309  __ Mov(w4, 0x00001234);
310  __ Mov(w5, 0x12340000);
311  __ Mov(w6, 0x12345678);
312  END();
313
314  RUN();
315
316  ASSERT_EQUAL_64(0xffffffff, x0);
317  ASSERT_EQUAL_64(0xffff1234, x1);
318  ASSERT_EQUAL_64(0x1234ffff, x2);
319  ASSERT_EQUAL_64(0x00000000, x3);
320  ASSERT_EQUAL_64(0x00001234, x4);
321  ASSERT_EQUAL_64(0x12340000, x5);
322  ASSERT_EQUAL_64(0x12345678, x6);
323
324  TEARDOWN();
325}
326
327
328TEST(mov_imm_x) {
329  SETUP();
330
331  START();
332  __ Mov(x0, 0xffffffffffffffff);
333  __ Mov(x1, 0xffffffffffff1234);
334  __ Mov(x2, 0xffffffff12345678);
335  __ Mov(x3, 0xffff1234ffff5678);
336  __ Mov(x4, 0x1234ffffffff5678);
337  __ Mov(x5, 0x1234ffff5678ffff);
338  __ Mov(x6, 0x12345678ffffffff);
339  __ Mov(x7, 0x1234ffffffffffff);
340  __ Mov(x8, 0x123456789abcffff);
341  __ Mov(x9, 0x12345678ffff9abc);
342  __ Mov(x10, 0x1234ffff56789abc);
343  __ Mov(x11, 0xffff123456789abc);
344  __ Mov(x12, 0x0000000000000000);
345  __ Mov(x13, 0x0000000000001234);
346  __ Mov(x14, 0x0000000012345678);
347  __ Mov(x15, 0x0000123400005678);
348  __ Mov(x18, 0x1234000000005678);
349  __ Mov(x19, 0x1234000056780000);
350  __ Mov(x20, 0x1234567800000000);
351  __ Mov(x21, 0x1234000000000000);
352  __ Mov(x22, 0x123456789abc0000);
353  __ Mov(x23, 0x1234567800009abc);
354  __ Mov(x24, 0x1234000056789abc);
355  __ Mov(x25, 0x0000123456789abc);
356  __ Mov(x26, 0x123456789abcdef0);
357  __ Mov(x27, 0xffff000000000001);
358  __ Mov(x28, 0x8000ffff00000000);
359  END();
360
361  RUN();
362
363  ASSERT_EQUAL_64(0xffffffffffff1234, x1);
364  ASSERT_EQUAL_64(0xffffffff12345678, x2);
365  ASSERT_EQUAL_64(0xffff1234ffff5678, x3);
366  ASSERT_EQUAL_64(0x1234ffffffff5678, x4);
367  ASSERT_EQUAL_64(0x1234ffff5678ffff, x5);
368  ASSERT_EQUAL_64(0x12345678ffffffff, x6);
369  ASSERT_EQUAL_64(0x1234ffffffffffff, x7);
370  ASSERT_EQUAL_64(0x123456789abcffff, x8);
371  ASSERT_EQUAL_64(0x12345678ffff9abc, x9);
372  ASSERT_EQUAL_64(0x1234ffff56789abc, x10);
373  ASSERT_EQUAL_64(0xffff123456789abc, x11);
374  ASSERT_EQUAL_64(0x0000000000000000, x12);
375  ASSERT_EQUAL_64(0x0000000000001234, x13);
376  ASSERT_EQUAL_64(0x0000000012345678, x14);
377  ASSERT_EQUAL_64(0x0000123400005678, x15);
378  ASSERT_EQUAL_64(0x1234000000005678, x18);
379  ASSERT_EQUAL_64(0x1234000056780000, x19);
380  ASSERT_EQUAL_64(0x1234567800000000, x20);
381  ASSERT_EQUAL_64(0x1234000000000000, x21);
382  ASSERT_EQUAL_64(0x123456789abc0000, x22);
383  ASSERT_EQUAL_64(0x1234567800009abc, x23);
384  ASSERT_EQUAL_64(0x1234000056789abc, x24);
385  ASSERT_EQUAL_64(0x0000123456789abc, x25);
386  ASSERT_EQUAL_64(0x123456789abcdef0, x26);
387  ASSERT_EQUAL_64(0xffff000000000001, x27);
388  ASSERT_EQUAL_64(0x8000ffff00000000, x28);
389
390
391  TEARDOWN();
392}
393
394
395TEST(mov) {
396  SETUP();
397
398  START();
399  __ Mov(x0, 0xffffffffffffffff);
400  __ Mov(x1, 0xffffffffffffffff);
401  __ Mov(x2, 0xffffffffffffffff);
402  __ Mov(x3, 0xffffffffffffffff);
403
404  __ Mov(x0, 0x0123456789abcdef);
405
406  __ movz(x1, UINT64_C(0xabcd) << 16);
407  __ movk(x2, UINT64_C(0xabcd) << 32);
408  __ movn(x3, UINT64_C(0xabcd) << 48);
409
410  __ Mov(x4, 0x0123456789abcdef);
411  __ Mov(x5, x4);
412
413  __ Mov(w6, -1);
414
415  // Test that moves back to the same register have the desired effect. This
416  // is a no-op for X registers, and a truncation for W registers.
417  __ Mov(x7, 0x0123456789abcdef);
418  __ Mov(x7, x7);
419  __ Mov(x8, 0x0123456789abcdef);
420  __ Mov(w8, w8);
421  __ Mov(x9, 0x0123456789abcdef);
422  __ Mov(x9, Operand(x9));
423  __ Mov(x10, 0x0123456789abcdef);
424  __ Mov(w10, Operand(w10));
425
426  __ Mov(w11, 0xfff);
427  __ Mov(x12, 0xfff);
428  __ Mov(w13, Operand(w11, LSL, 1));
429  __ Mov(x14, Operand(x12, LSL, 2));
430  __ Mov(w15, Operand(w11, LSR, 3));
431  __ Mov(x18, Operand(x12, LSR, 4));
432  __ Mov(w19, Operand(w11, ASR, 11));
433  __ Mov(x20, Operand(x12, ASR, 12));
434  __ Mov(w21, Operand(w11, ROR, 13));
435  __ Mov(x22, Operand(x12, ROR, 14));
436  __ Mov(w23, Operand(w13, UXTB));
437  __ Mov(x24, Operand(x13, SXTB, 1));
438  __ Mov(w25, Operand(w13, UXTH, 2));
439  __ Mov(x26, Operand(x13, SXTH, 3));
440  __ Mov(x27, Operand(w13, UXTW, 4));
441
442  __ Mov(x28, 0x0123456789abcdef);
443  __ Mov(w28, w28, kDiscardForSameWReg);
444  END();
445
446  RUN();
447
448  ASSERT_EQUAL_64(0x0123456789abcdef, x0);
449  ASSERT_EQUAL_64(0x00000000abcd0000, x1);
450  ASSERT_EQUAL_64(0xffffabcdffffffff, x2);
451  ASSERT_EQUAL_64(0x5432ffffffffffff, x3);
452  ASSERT_EQUAL_64(x4, x5);
453  ASSERT_EQUAL_32(-1, w6);
454  ASSERT_EQUAL_64(0x0123456789abcdef, x7);
455  ASSERT_EQUAL_32(0x89abcdef, w8);
456  ASSERT_EQUAL_64(0x0123456789abcdef, x9);
457  ASSERT_EQUAL_32(0x89abcdef, w10);
458  ASSERT_EQUAL_64(0x00000fff, x11);
459  ASSERT_EQUAL_64(0x0000000000000fff, x12);
460  ASSERT_EQUAL_64(0x00001ffe, x13);
461  ASSERT_EQUAL_64(0x0000000000003ffc, x14);
462  ASSERT_EQUAL_64(0x000001ff, x15);
463  ASSERT_EQUAL_64(0x00000000000000ff, x18);
464  ASSERT_EQUAL_64(0x00000001, x19);
465  ASSERT_EQUAL_64(0x0000000000000000, x20);
466  ASSERT_EQUAL_64(0x7ff80000, x21);
467  ASSERT_EQUAL_64(0x3ffc000000000000, x22);
468  ASSERT_EQUAL_64(0x000000fe, x23);
469  ASSERT_EQUAL_64(0xfffffffffffffffc, x24);
470  ASSERT_EQUAL_64(0x00007ff8, x25);
471  ASSERT_EQUAL_64(0x000000000000fff0, x26);
472  ASSERT_EQUAL_64(0x000000000001ffe0, x27);
473  ASSERT_EQUAL_64(0x0123456789abcdef, x28);
474
475  TEARDOWN();
476}
477
478
479TEST(orr) {
480  SETUP();
481
482  START();
483  __ Mov(x0, 0xf0f0);
484  __ Mov(x1, 0xf00000ff);
485
486  __ Orr(x2, x0, Operand(x1));
487  __ Orr(w3, w0, Operand(w1, LSL, 28));
488  __ Orr(x4, x0, Operand(x1, LSL, 32));
489  __ Orr(x5, x0, Operand(x1, LSR, 4));
490  __ Orr(w6, w0, Operand(w1, ASR, 4));
491  __ Orr(x7, x0, Operand(x1, ASR, 4));
492  __ Orr(w8, w0, Operand(w1, ROR, 12));
493  __ Orr(x9, x0, Operand(x1, ROR, 12));
494  __ Orr(w10, w0, 0xf);
495  __ Orr(x11, x0, 0xf0000000f0000000);
496  END();
497
498  RUN();
499
500  ASSERT_EQUAL_64(0x00000000f000f0ff, x2);
501  ASSERT_EQUAL_64(0xf000f0f0, x3);
502  ASSERT_EQUAL_64(0xf00000ff0000f0f0, x4);
503  ASSERT_EQUAL_64(0x000000000f00f0ff, x5);
504  ASSERT_EQUAL_64(0xff00f0ff, x6);
505  ASSERT_EQUAL_64(0x000000000f00f0ff, x7);
506  ASSERT_EQUAL_64(0x0ffff0f0, x8);
507  ASSERT_EQUAL_64(0x0ff00000000ff0f0, x9);
508  ASSERT_EQUAL_64(0x0000f0ff, x10);
509  ASSERT_EQUAL_64(0xf0000000f000f0f0, x11);
510
511  TEARDOWN();
512}
513
514
515TEST(orr_extend) {
516  SETUP();
517
518  START();
519  __ Mov(x0, 1);
520  __ Mov(x1, 0x8000000080008080);
521  __ Orr(w6, w0, Operand(w1, UXTB));
522  __ Orr(x7, x0, Operand(x1, UXTH, 1));
523  __ Orr(w8, w0, Operand(w1, UXTW, 2));
524  __ Orr(x9, x0, Operand(x1, UXTX, 3));
525  __ Orr(w10, w0, Operand(w1, SXTB));
526  __ Orr(x11, x0, Operand(x1, SXTH, 1));
527  __ Orr(x12, x0, Operand(x1, SXTW, 2));
528  __ Orr(x13, x0, Operand(x1, SXTX, 3));
529  END();
530
531  RUN();
532
533  ASSERT_EQUAL_64(0x00000081, x6);
534  ASSERT_EQUAL_64(0x0000000000010101, x7);
535  ASSERT_EQUAL_64(0x00020201, x8);
536  ASSERT_EQUAL_64(0x0000000400040401, x9);
537  ASSERT_EQUAL_64(0xffffff81, x10);
538  ASSERT_EQUAL_64(0xffffffffffff0101, x11);
539  ASSERT_EQUAL_64(0xfffffffe00020201, x12);
540  ASSERT_EQUAL_64(0x0000000400040401, x13);
541
542  TEARDOWN();
543}
544
545
546TEST(bitwise_wide_imm) {
547  SETUP();
548
549  START();
550  __ Mov(x0, 0);
551  __ Mov(x1, 0xf0f0f0f0f0f0f0f0);
552
553  __ Orr(x10, x0, 0x1234567890abcdef);
554  __ Orr(w11, w1, 0x90abcdef);
555  END();
556
557  RUN();
558
559  ASSERT_EQUAL_64(0, x0);
560  ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0, x1);
561  ASSERT_EQUAL_64(0x1234567890abcdef, x10);
562  ASSERT_EQUAL_64(0x00000000f0fbfdff, x11);
563
564  TEARDOWN();
565}
566
567
568TEST(orn) {
569  SETUP();
570
571  START();
572  __ Mov(x0, 0xf0f0);
573  __ Mov(x1, 0xf00000ff);
574
575  __ Orn(x2, x0, Operand(x1));
576  __ Orn(w3, w0, Operand(w1, LSL, 4));
577  __ Orn(x4, x0, Operand(x1, LSL, 4));
578  __ Orn(x5, x0, Operand(x1, LSR, 1));
579  __ Orn(w6, w0, Operand(w1, ASR, 1));
580  __ Orn(x7, x0, Operand(x1, ASR, 1));
581  __ Orn(w8, w0, Operand(w1, ROR, 16));
582  __ Orn(x9, x0, Operand(x1, ROR, 16));
583  __ Orn(w10, w0, 0x0000ffff);
584  __ Orn(x11, x0, 0x0000ffff0000ffff);
585  END();
586
587  RUN();
588
589  ASSERT_EQUAL_64(0xffffffff0ffffff0, x2);
590  ASSERT_EQUAL_64(0xfffff0ff, x3);
591  ASSERT_EQUAL_64(0xfffffff0fffff0ff, x4);
592  ASSERT_EQUAL_64(0xffffffff87fffff0, x5);
593  ASSERT_EQUAL_64(0x07fffff0, x6);
594  ASSERT_EQUAL_64(0xffffffff87fffff0, x7);
595  ASSERT_EQUAL_64(0xff00ffff, x8);
596  ASSERT_EQUAL_64(0xff00ffffffffffff, x9);
597  ASSERT_EQUAL_64(0xfffff0f0, x10);
598  ASSERT_EQUAL_64(0xffff0000fffff0f0, x11);
599
600  TEARDOWN();
601}
602
603
604TEST(orn_extend) {
605  SETUP();
606
607  START();
608  __ Mov(x0, 1);
609  __ Mov(x1, 0x8000000080008081);
610  __ Orn(w6, w0, Operand(w1, UXTB));
611  __ Orn(x7, x0, Operand(x1, UXTH, 1));
612  __ Orn(w8, w0, Operand(w1, UXTW, 2));
613  __ Orn(x9, x0, Operand(x1, UXTX, 3));
614  __ Orn(w10, w0, Operand(w1, SXTB));
615  __ Orn(x11, x0, Operand(x1, SXTH, 1));
616  __ Orn(x12, x0, Operand(x1, SXTW, 2));
617  __ Orn(x13, x0, Operand(x1, SXTX, 3));
618  END();
619
620  RUN();
621
622  ASSERT_EQUAL_64(0xffffff7f, x6);
623  ASSERT_EQUAL_64(0xfffffffffffefefd, x7);
624  ASSERT_EQUAL_64(0xfffdfdfb, x8);
625  ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x9);
626  ASSERT_EQUAL_64(0x0000007f, x10);
627  ASSERT_EQUAL_64(0x000000000000fefd, x11);
628  ASSERT_EQUAL_64(0x00000001fffdfdfb, x12);
629  ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x13);
630
631  TEARDOWN();
632}
633
634
635TEST(and_) {
636  SETUP();
637
638  START();
639  __ Mov(x0, 0xfff0);
640  __ Mov(x1, 0xf00000ff);
641
642  __ And(x2, x0, Operand(x1));
643  __ And(w3, w0, Operand(w1, LSL, 4));
644  __ And(x4, x0, Operand(x1, LSL, 4));
645  __ And(x5, x0, Operand(x1, LSR, 1));
646  __ And(w6, w0, Operand(w1, ASR, 20));
647  __ And(x7, x0, Operand(x1, ASR, 20));
648  __ And(w8, w0, Operand(w1, ROR, 28));
649  __ And(x9, x0, Operand(x1, ROR, 28));
650  __ And(w10, w0, Operand(0xff00));
651  __ And(x11, x0, Operand(0xff));
652  END();
653
654  RUN();
655
656  ASSERT_EQUAL_64(0x000000f0, x2);
657  ASSERT_EQUAL_64(0x00000ff0, x3);
658  ASSERT_EQUAL_64(0x00000ff0, x4);
659  ASSERT_EQUAL_64(0x00000070, x5);
660  ASSERT_EQUAL_64(0x0000ff00, x6);
661  ASSERT_EQUAL_64(0x00000f00, x7);
662  ASSERT_EQUAL_64(0x00000ff0, x8);
663  ASSERT_EQUAL_64(0x00000000, x9);
664  ASSERT_EQUAL_64(0x0000ff00, x10);
665  ASSERT_EQUAL_64(0x000000f0, x11);
666
667  TEARDOWN();
668}
669
670
671TEST(and_extend) {
672  SETUP();
673
674  START();
675  __ Mov(x0, 0xffffffffffffffff);
676  __ Mov(x1, 0x8000000080008081);
677  __ And(w6, w0, Operand(w1, UXTB));
678  __ And(x7, x0, Operand(x1, UXTH, 1));
679  __ And(w8, w0, Operand(w1, UXTW, 2));
680  __ And(x9, x0, Operand(x1, UXTX, 3));
681  __ And(w10, w0, Operand(w1, SXTB));
682  __ And(x11, x0, Operand(x1, SXTH, 1));
683  __ And(x12, x0, Operand(x1, SXTW, 2));
684  __ And(x13, x0, Operand(x1, SXTX, 3));
685  END();
686
687  RUN();
688
689  ASSERT_EQUAL_64(0x00000081, x6);
690  ASSERT_EQUAL_64(0x0000000000010102, x7);
691  ASSERT_EQUAL_64(0x00020204, x8);
692  ASSERT_EQUAL_64(0x0000000400040408, x9);
693  ASSERT_EQUAL_64(0xffffff81, x10);
694  ASSERT_EQUAL_64(0xffffffffffff0102, x11);
695  ASSERT_EQUAL_64(0xfffffffe00020204, x12);
696  ASSERT_EQUAL_64(0x0000000400040408, x13);
697
698  TEARDOWN();
699}
700
701
702TEST(ands) {
703  SETUP();
704
705  START();
706  __ Mov(x1, 0xf00000ff);
707  __ Ands(w0, w1, Operand(w1));
708  END();
709
710  RUN();
711
712  ASSERT_EQUAL_NZCV(NFlag);
713  ASSERT_EQUAL_64(0xf00000ff, x0);
714
715  START();
716  __ Mov(x0, 0xfff0);
717  __ Mov(x1, 0xf00000ff);
718  __ Ands(w0, w0, Operand(w1, LSR, 4));
719  END();
720
721  RUN();
722
723  ASSERT_EQUAL_NZCV(ZFlag);
724  ASSERT_EQUAL_64(0x00000000, x0);
725
726  START();
727  __ Mov(x0, 0x8000000000000000);
728  __ Mov(x1, 0x00000001);
729  __ Ands(x0, x0, Operand(x1, ROR, 1));
730  END();
731
732  RUN();
733
734  ASSERT_EQUAL_NZCV(NFlag);
735  ASSERT_EQUAL_64(0x8000000000000000, x0);
736
737  START();
738  __ Mov(x0, 0xfff0);
739  __ Ands(w0, w0, Operand(0xf));
740  END();
741
742  RUN();
743
744  ASSERT_EQUAL_NZCV(ZFlag);
745  ASSERT_EQUAL_64(0x00000000, x0);
746
747  START();
748  __ Mov(x0, 0xff000000);
749  __ Ands(w0, w0, Operand(0x80000000));
750  END();
751
752  RUN();
753
754  ASSERT_EQUAL_NZCV(NFlag);
755  ASSERT_EQUAL_64(0x80000000, x0);
756
757  TEARDOWN();
758}
759
760
761TEST(bic) {
762  SETUP();
763
764  START();
765  __ Mov(x0, 0xfff0);
766  __ Mov(x1, 0xf00000ff);
767
768  __ Bic(x2, x0, Operand(x1));
769  __ Bic(w3, w0, Operand(w1, LSL, 4));
770  __ Bic(x4, x0, Operand(x1, LSL, 4));
771  __ Bic(x5, x0, Operand(x1, LSR, 1));
772  __ Bic(w6, w0, Operand(w1, ASR, 20));
773  __ Bic(x7, x0, Operand(x1, ASR, 20));
774  __ Bic(w8, w0, Operand(w1, ROR, 28));
775  __ Bic(x9, x0, Operand(x1, ROR, 24));
776  __ Bic(x10, x0, Operand(0x1f));
777  __ Bic(x11, x0, Operand(0x100));
778
779  // Test bic into sp when the constant cannot be encoded in the immediate
780  // field.
781  // Use x20 to preserve sp. We check for the result via x21 because the
782  // test infrastructure requires that sp be restored to its original value.
783  __ Mov(x20, sp);
784  __ Mov(x0, 0xffffff);
785  __ Bic(sp, x0, Operand(0xabcdef));
786  __ Mov(x21, sp);
787  __ Mov(sp, x20);
788  END();
789
790  RUN();
791
792  ASSERT_EQUAL_64(0x0000ff00, x2);
793  ASSERT_EQUAL_64(0x0000f000, x3);
794  ASSERT_EQUAL_64(0x0000f000, x4);
795  ASSERT_EQUAL_64(0x0000ff80, x5);
796  ASSERT_EQUAL_64(0x000000f0, x6);
797  ASSERT_EQUAL_64(0x0000f0f0, x7);
798  ASSERT_EQUAL_64(0x0000f000, x8);
799  ASSERT_EQUAL_64(0x0000ff00, x9);
800  ASSERT_EQUAL_64(0x0000ffe0, x10);
801  ASSERT_EQUAL_64(0x0000fef0, x11);
802
803  ASSERT_EQUAL_64(0x543210, x21);
804
805  TEARDOWN();
806}
807
808
809TEST(bic_extend) {
810  SETUP();
811
812  START();
813  __ Mov(x0, 0xffffffffffffffff);
814  __ Mov(x1, 0x8000000080008081);
815  __ Bic(w6, w0, Operand(w1, UXTB));
816  __ Bic(x7, x0, Operand(x1, UXTH, 1));
817  __ Bic(w8, w0, Operand(w1, UXTW, 2));
818  __ Bic(x9, x0, Operand(x1, UXTX, 3));
819  __ Bic(w10, w0, Operand(w1, SXTB));
820  __ Bic(x11, x0, Operand(x1, SXTH, 1));
821  __ Bic(x12, x0, Operand(x1, SXTW, 2));
822  __ Bic(x13, x0, Operand(x1, SXTX, 3));
823  END();
824
825  RUN();
826
827  ASSERT_EQUAL_64(0xffffff7e, x6);
828  ASSERT_EQUAL_64(0xfffffffffffefefd, x7);
829  ASSERT_EQUAL_64(0xfffdfdfb, x8);
830  ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x9);
831  ASSERT_EQUAL_64(0x0000007e, x10);
832  ASSERT_EQUAL_64(0x000000000000fefd, x11);
833  ASSERT_EQUAL_64(0x00000001fffdfdfb, x12);
834  ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x13);
835
836  TEARDOWN();
837}
838
839
840TEST(bics) {
841  SETUP();
842
843  START();
844  __ Mov(x1, 0xffff);
845  __ Bics(w0, w1, Operand(w1));
846  END();
847
848  RUN();
849
850  ASSERT_EQUAL_NZCV(ZFlag);
851  ASSERT_EQUAL_64(0x00000000, x0);
852
853  START();
854  __ Mov(x0, 0xffffffff);
855  __ Bics(w0, w0, Operand(w0, LSR, 1));
856  END();
857
858  RUN();
859
860  ASSERT_EQUAL_NZCV(NFlag);
861  ASSERT_EQUAL_64(0x80000000, x0);
862
863  START();
864  __ Mov(x0, 0x8000000000000000);
865  __ Mov(x1, 0x00000001);
866  __ Bics(x0, x0, Operand(x1, ROR, 1));
867  END();
868
869  RUN();
870
871  ASSERT_EQUAL_NZCV(ZFlag);
872  ASSERT_EQUAL_64(0x00000000, x0);
873
874  START();
875  __ Mov(x0, 0xffffffffffffffff);
876  __ Bics(x0, x0, 0x7fffffffffffffff);
877  END();
878
879  RUN();
880
881  ASSERT_EQUAL_NZCV(NFlag);
882  ASSERT_EQUAL_64(0x8000000000000000, x0);
883
884  START();
885  __ Mov(w0, 0xffff0000);
886  __ Bics(w0, w0, 0xfffffff0);
887  END();
888
889  RUN();
890
891  ASSERT_EQUAL_NZCV(ZFlag);
892  ASSERT_EQUAL_64(0x00000000, x0);
893
894  TEARDOWN();
895}
896
897
898TEST(eor) {
899  SETUP();
900
901  START();
902  __ Mov(x0, 0xfff0);
903  __ Mov(x1, 0xf00000ff);
904
905  __ Eor(x2, x0, Operand(x1));
906  __ Eor(w3, w0, Operand(w1, LSL, 4));
907  __ Eor(x4, x0, Operand(x1, LSL, 4));
908  __ Eor(x5, x0, Operand(x1, LSR, 1));
909  __ Eor(w6, w0, Operand(w1, ASR, 20));
910  __ Eor(x7, x0, Operand(x1, ASR, 20));
911  __ Eor(w8, w0, Operand(w1, ROR, 28));
912  __ Eor(x9, x0, Operand(x1, ROR, 28));
913  __ Eor(w10, w0, 0xff00ff00);
914  __ Eor(x11, x0, 0xff00ff00ff00ff00);
915  END();
916
917  RUN();
918
919  ASSERT_EQUAL_64(0x00000000f000ff0f, x2);
920  ASSERT_EQUAL_64(0x0000f000, x3);
921  ASSERT_EQUAL_64(0x0000000f0000f000, x4);
922  ASSERT_EQUAL_64(0x000000007800ff8f, x5);
923  ASSERT_EQUAL_64(0xffff00f0, x6);
924  ASSERT_EQUAL_64(0x000000000000f0f0, x7);
925  ASSERT_EQUAL_64(0x0000f00f, x8);
926  ASSERT_EQUAL_64(0x00000ff00000ffff, x9);
927  ASSERT_EQUAL_64(0xff0000f0, x10);
928  ASSERT_EQUAL_64(0xff00ff00ff0000f0, x11);
929
930  TEARDOWN();
931}
932
933TEST(eor_extend) {
934  SETUP();
935
936  START();
937  __ Mov(x0, 0x1111111111111111);
938  __ Mov(x1, 0x8000000080008081);
939  __ Eor(w6, w0, Operand(w1, UXTB));
940  __ Eor(x7, x0, Operand(x1, UXTH, 1));
941  __ Eor(w8, w0, Operand(w1, UXTW, 2));
942  __ Eor(x9, x0, Operand(x1, UXTX, 3));
943  __ Eor(w10, w0, Operand(w1, SXTB));
944  __ Eor(x11, x0, Operand(x1, SXTH, 1));
945  __ Eor(x12, x0, Operand(x1, SXTW, 2));
946  __ Eor(x13, x0, Operand(x1, SXTX, 3));
947  END();
948
949  RUN();
950
951  ASSERT_EQUAL_64(0x11111190, x6);
952  ASSERT_EQUAL_64(0x1111111111101013, x7);
953  ASSERT_EQUAL_64(0x11131315, x8);
954  ASSERT_EQUAL_64(0x1111111511151519, x9);
955  ASSERT_EQUAL_64(0xeeeeee90, x10);
956  ASSERT_EQUAL_64(0xeeeeeeeeeeee1013, x11);
957  ASSERT_EQUAL_64(0xeeeeeeef11131315, x12);
958  ASSERT_EQUAL_64(0x1111111511151519, x13);
959
960  TEARDOWN();
961}
962
963
964TEST(eon) {
965  SETUP();
966
967  START();
968  __ Mov(x0, 0xfff0);
969  __ Mov(x1, 0xf00000ff);
970
971  __ Eon(x2, x0, Operand(x1));
972  __ Eon(w3, w0, Operand(w1, LSL, 4));
973  __ Eon(x4, x0, Operand(x1, LSL, 4));
974  __ Eon(x5, x0, Operand(x1, LSR, 1));
975  __ Eon(w6, w0, Operand(w1, ASR, 20));
976  __ Eon(x7, x0, Operand(x1, ASR, 20));
977  __ Eon(w8, w0, Operand(w1, ROR, 28));
978  __ Eon(x9, x0, Operand(x1, ROR, 28));
979  __ Eon(w10, w0, 0x03c003c0);
980  __ Eon(x11, x0, 0x0000100000001000);
981  END();
982
983  RUN();
984
985  ASSERT_EQUAL_64(0xffffffff0fff00f0, x2);
986  ASSERT_EQUAL_64(0xffff0fff, x3);
987  ASSERT_EQUAL_64(0xfffffff0ffff0fff, x4);
988  ASSERT_EQUAL_64(0xffffffff87ff0070, x5);
989  ASSERT_EQUAL_64(0x0000ff0f, x6);
990  ASSERT_EQUAL_64(0xffffffffffff0f0f, x7);
991  ASSERT_EQUAL_64(0xffff0ff0, x8);
992  ASSERT_EQUAL_64(0xfffff00fffff0000, x9);
993  ASSERT_EQUAL_64(0xfc3f03cf, x10);
994  ASSERT_EQUAL_64(0xffffefffffff100f, x11);
995
996  TEARDOWN();
997}
998
999
1000TEST(eon_extend) {
1001  SETUP();
1002
1003  START();
1004  __ Mov(x0, 0x1111111111111111);
1005  __ Mov(x1, 0x8000000080008081);
1006  __ Eon(w6, w0, Operand(w1, UXTB));
1007  __ Eon(x7, x0, Operand(x1, UXTH, 1));
1008  __ Eon(w8, w0, Operand(w1, UXTW, 2));
1009  __ Eon(x9, x0, Operand(x1, UXTX, 3));
1010  __ Eon(w10, w0, Operand(w1, SXTB));
1011  __ Eon(x11, x0, Operand(x1, SXTH, 1));
1012  __ Eon(x12, x0, Operand(x1, SXTW, 2));
1013  __ Eon(x13, x0, Operand(x1, SXTX, 3));
1014  END();
1015
1016  RUN();
1017
1018  ASSERT_EQUAL_64(0xeeeeee6f, x6);
1019  ASSERT_EQUAL_64(0xeeeeeeeeeeefefec, x7);
1020  ASSERT_EQUAL_64(0xeeececea, x8);
1021  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6, x9);
1022  ASSERT_EQUAL_64(0x1111116f, x10);
1023  ASSERT_EQUAL_64(0x111111111111efec, x11);
1024  ASSERT_EQUAL_64(0x11111110eeececea, x12);
1025  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6, x13);
1026
1027  TEARDOWN();
1028}
1029
1030
1031TEST(mul) {
1032  SETUP();
1033
1034  START();
1035  __ Mov(x16, 0);
1036  __ Mov(x17, 1);
1037  __ Mov(x18, 0xffffffff);
1038  __ Mov(x19, 0xffffffffffffffff);
1039
1040  __ Mul(w0, w16, w16);
1041  __ Mul(w1, w16, w17);
1042  __ Mul(w2, w17, w18);
1043  __ Mul(w3, w18, w19);
1044  __ Mul(x4, x16, x16);
1045  __ Mul(x5, x17, x18);
1046  __ Mul(x6, x18, x19);
1047  __ Mul(x7, x19, x19);
1048  __ Smull(x8, w17, w18);
1049  __ Smull(x9, w18, w18);
1050  __ Smull(x10, w19, w19);
1051  __ Mneg(w11, w16, w16);
1052  __ Mneg(w12, w16, w17);
1053  __ Mneg(w13, w17, w18);
1054  __ Mneg(w14, w18, w19);
1055  __ Mneg(x20, x16, x16);
1056  __ Mneg(x21, x17, x18);
1057  __ Mneg(x22, x18, x19);
1058  __ Mneg(x23, x19, x19);
1059  END();
1060
1061  RUN();
1062
1063  ASSERT_EQUAL_64(0, x0);
1064  ASSERT_EQUAL_64(0, x1);
1065  ASSERT_EQUAL_64(0xffffffff, x2);
1066  ASSERT_EQUAL_64(1, x3);
1067  ASSERT_EQUAL_64(0, x4);
1068  ASSERT_EQUAL_64(0xffffffff, x5);
1069  ASSERT_EQUAL_64(0xffffffff00000001, x6);
1070  ASSERT_EQUAL_64(1, x7);
1071  ASSERT_EQUAL_64(0xffffffffffffffff, x8);
1072  ASSERT_EQUAL_64(1, x9);
1073  ASSERT_EQUAL_64(1, x10);
1074  ASSERT_EQUAL_64(0, x11);
1075  ASSERT_EQUAL_64(0, x12);
1076  ASSERT_EQUAL_64(1, x13);
1077  ASSERT_EQUAL_64(0xffffffff, x14);
1078  ASSERT_EQUAL_64(0, x20);
1079  ASSERT_EQUAL_64(0xffffffff00000001, x21);
1080  ASSERT_EQUAL_64(0xffffffff, x22);
1081  ASSERT_EQUAL_64(0xffffffffffffffff, x23);
1082
1083  TEARDOWN();
1084}
1085
1086
1087static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1088  SETUP();
1089  START();
1090  __ Mov(w0, a);
1091  __ Mov(w1, b);
1092  __ Smull(x2, w0, w1);
1093  END();
1094  RUN();
1095  ASSERT_EQUAL_64(expected, x2);
1096  TEARDOWN();
1097}
1098
1099
1100TEST(smull) {
1101  SmullHelper(0, 0, 0);
1102  SmullHelper(1, 1, 1);
1103  SmullHelper(-1, -1, 1);
1104  SmullHelper(1, -1, -1);
1105  SmullHelper(0xffffffff80000000, 0x80000000, 1);
1106  SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1107}
1108
1109
1110TEST(madd) {
1111  SETUP();
1112
1113  START();
1114  __ Mov(x16, 0);
1115  __ Mov(x17, 1);
1116  __ Mov(x18, 0xffffffff);
1117  __ Mov(x19, 0xffffffffffffffff);
1118
1119  __ Madd(w0, w16, w16, w16);
1120  __ Madd(w1, w16, w16, w17);
1121  __ Madd(w2, w16, w16, w18);
1122  __ Madd(w3, w16, w16, w19);
1123  __ Madd(w4, w16, w17, w17);
1124  __ Madd(w5, w17, w17, w18);
1125  __ Madd(w6, w17, w17, w19);
1126  __ Madd(w7, w17, w18, w16);
1127  __ Madd(w8, w17, w18, w18);
1128  __ Madd(w9, w18, w18, w17);
1129  __ Madd(w10, w18, w19, w18);
1130  __ Madd(w11, w19, w19, w19);
1131
1132  __ Madd(x12, x16, x16, x16);
1133  __ Madd(x13, x16, x16, x17);
1134  __ Madd(x14, x16, x16, x18);
1135  __ Madd(x15, x16, x16, x19);
1136  __ Madd(x20, x16, x17, x17);
1137  __ Madd(x21, x17, x17, x18);
1138  __ Madd(x22, x17, x17, x19);
1139  __ Madd(x23, x17, x18, x16);
1140  __ Madd(x24, x17, x18, x18);
1141  __ Madd(x25, x18, x18, x17);
1142  __ Madd(x26, x18, x19, x18);
1143  __ Madd(x27, x19, x19, x19);
1144
1145  END();
1146
1147  RUN();
1148
1149  ASSERT_EQUAL_64(0, x0);
1150  ASSERT_EQUAL_64(1, x1);
1151  ASSERT_EQUAL_64(0xffffffff, x2);
1152  ASSERT_EQUAL_64(0xffffffff, x3);
1153  ASSERT_EQUAL_64(1, x4);
1154  ASSERT_EQUAL_64(0, x5);
1155  ASSERT_EQUAL_64(0, x6);
1156  ASSERT_EQUAL_64(0xffffffff, x7);
1157  ASSERT_EQUAL_64(0xfffffffe, x8);
1158  ASSERT_EQUAL_64(2, x9);
1159  ASSERT_EQUAL_64(0, x10);
1160  ASSERT_EQUAL_64(0, x11);
1161
1162  ASSERT_EQUAL_64(0, x12);
1163  ASSERT_EQUAL_64(1, x13);
1164  ASSERT_EQUAL_64(0x00000000ffffffff, x14);
1165  ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1166  ASSERT_EQUAL_64(1, x20);
1167  ASSERT_EQUAL_64(0x0000000100000000, x21);
1168  ASSERT_EQUAL_64(0, x22);
1169  ASSERT_EQUAL_64(0x00000000ffffffff, x23);
1170  ASSERT_EQUAL_64(0x00000001fffffffe, x24);
1171  ASSERT_EQUAL_64(0xfffffffe00000002, x25);
1172  ASSERT_EQUAL_64(0, x26);
1173  ASSERT_EQUAL_64(0, x27);
1174
1175  TEARDOWN();
1176}
1177
1178
1179TEST(msub) {
1180  SETUP();
1181
1182  START();
1183  __ Mov(x16, 0);
1184  __ Mov(x17, 1);
1185  __ Mov(x18, 0xffffffff);
1186  __ Mov(x19, 0xffffffffffffffff);
1187
1188  __ Msub(w0, w16, w16, w16);
1189  __ Msub(w1, w16, w16, w17);
1190  __ Msub(w2, w16, w16, w18);
1191  __ Msub(w3, w16, w16, w19);
1192  __ Msub(w4, w16, w17, w17);
1193  __ Msub(w5, w17, w17, w18);
1194  __ Msub(w6, w17, w17, w19);
1195  __ Msub(w7, w17, w18, w16);
1196  __ Msub(w8, w17, w18, w18);
1197  __ Msub(w9, w18, w18, w17);
1198  __ Msub(w10, w18, w19, w18);
1199  __ Msub(w11, w19, w19, w19);
1200
1201  __ Msub(x12, x16, x16, x16);
1202  __ Msub(x13, x16, x16, x17);
1203  __ Msub(x14, x16, x16, x18);
1204  __ Msub(x15, x16, x16, x19);
1205  __ Msub(x20, x16, x17, x17);
1206  __ Msub(x21, x17, x17, x18);
1207  __ Msub(x22, x17, x17, x19);
1208  __ Msub(x23, x17, x18, x16);
1209  __ Msub(x24, x17, x18, x18);
1210  __ Msub(x25, x18, x18, x17);
1211  __ Msub(x26, x18, x19, x18);
1212  __ Msub(x27, x19, x19, x19);
1213
1214  END();
1215
1216  RUN();
1217
1218  ASSERT_EQUAL_64(0, x0);
1219  ASSERT_EQUAL_64(1, x1);
1220  ASSERT_EQUAL_64(0xffffffff, x2);
1221  ASSERT_EQUAL_64(0xffffffff, x3);
1222  ASSERT_EQUAL_64(1, x4);
1223  ASSERT_EQUAL_64(0xfffffffe, x5);
1224  ASSERT_EQUAL_64(0xfffffffe, x6);
1225  ASSERT_EQUAL_64(1, x7);
1226  ASSERT_EQUAL_64(0, x8);
1227  ASSERT_EQUAL_64(0, x9);
1228  ASSERT_EQUAL_64(0xfffffffe, x10);
1229  ASSERT_EQUAL_64(0xfffffffe, x11);
1230
1231  ASSERT_EQUAL_64(0, x12);
1232  ASSERT_EQUAL_64(1, x13);
1233  ASSERT_EQUAL_64(0x00000000ffffffff, x14);
1234  ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1235  ASSERT_EQUAL_64(1, x20);
1236  ASSERT_EQUAL_64(0x00000000fffffffe, x21);
1237  ASSERT_EQUAL_64(0xfffffffffffffffe, x22);
1238  ASSERT_EQUAL_64(0xffffffff00000001, x23);
1239  ASSERT_EQUAL_64(0, x24);
1240  ASSERT_EQUAL_64(0x0000000200000000, x25);
1241  ASSERT_EQUAL_64(0x00000001fffffffe, x26);
1242  ASSERT_EQUAL_64(0xfffffffffffffffe, x27);
1243
1244  TEARDOWN();
1245}
1246
1247
1248TEST(smulh) {
1249  SETUP();
1250
1251  START();
1252  __ Mov(x20, 0);
1253  __ Mov(x21, 1);
1254  __ Mov(x22, 0x0000000100000000);
1255  __ Mov(x23, 0x0000000012345678);
1256  __ Mov(x24, 0x0123456789abcdef);
1257  __ Mov(x25, 0x0000000200000000);
1258  __ Mov(x26, 0x8000000000000000);
1259  __ Mov(x27, 0xffffffffffffffff);
1260  __ Mov(x28, 0x5555555555555555);
1261  __ Mov(x29, 0xaaaaaaaaaaaaaaaa);
1262
1263  __ Smulh(x0, x20, x24);
1264  __ Smulh(x1, x21, x24);
1265  __ Smulh(x2, x22, x23);
1266  __ Smulh(x3, x22, x24);
1267  __ Smulh(x4, x24, x25);
1268  __ Smulh(x5, x23, x27);
1269  __ Smulh(x6, x26, x26);
1270  __ Smulh(x7, x26, x27);
1271  __ Smulh(x8, x27, x27);
1272  __ Smulh(x9, x28, x28);
1273  __ Smulh(x10, x28, x29);
1274  __ Smulh(x11, x29, x29);
1275  END();
1276
1277  RUN();
1278
1279  ASSERT_EQUAL_64(0, x0);
1280  ASSERT_EQUAL_64(0, x1);
1281  ASSERT_EQUAL_64(0, x2);
1282  ASSERT_EQUAL_64(0x0000000001234567, x3);
1283  ASSERT_EQUAL_64(0x0000000002468acf, x4);
1284  ASSERT_EQUAL_64(0xffffffffffffffff, x5);
1285  ASSERT_EQUAL_64(0x4000000000000000, x6);
1286  ASSERT_EQUAL_64(0, x7);
1287  ASSERT_EQUAL_64(0, x8);
1288  ASSERT_EQUAL_64(0x1c71c71c71c71c71, x9);
1289  ASSERT_EQUAL_64(0xe38e38e38e38e38e, x10);
1290  ASSERT_EQUAL_64(0x1c71c71c71c71c72, x11);
1291
1292  TEARDOWN();
1293}
1294
1295
1296TEST(smaddl_umaddl) {
1297  SETUP();
1298
1299  START();
1300  __ Mov(x17, 1);
1301  __ Mov(x18, 0x00000000ffffffff);
1302  __ Mov(x19, 0xffffffffffffffff);
1303  __ Mov(x20, 4);
1304  __ Mov(x21, 0x0000000200000000);
1305
1306  __ Smaddl(x9, w17, w18, x20);
1307  __ Smaddl(x10, w18, w18, x20);
1308  __ Smaddl(x11, w19, w19, x20);
1309  __ Smaddl(x12, w19, w19, x21);
1310  __ Umaddl(x13, w17, w18, x20);
1311  __ Umaddl(x14, w18, w18, x20);
1312  __ Umaddl(x15, w19, w19, x20);
1313  __ Umaddl(x22, w19, w19, x21);
1314  END();
1315
1316  RUN();
1317
1318  ASSERT_EQUAL_64(3, x9);
1319  ASSERT_EQUAL_64(5, x10);
1320  ASSERT_EQUAL_64(5, x11);
1321  ASSERT_EQUAL_64(0x0000000200000001, x12);
1322  ASSERT_EQUAL_64(0x0000000100000003, x13);
1323  ASSERT_EQUAL_64(0xfffffffe00000005, x14);
1324  ASSERT_EQUAL_64(0xfffffffe00000005, x15);
1325  ASSERT_EQUAL_64(1, x22);
1326
1327  TEARDOWN();
1328}
1329
1330
1331TEST(smsubl_umsubl) {
1332  SETUP();
1333
1334  START();
1335  __ Mov(x17, 1);
1336  __ Mov(x18, 0x00000000ffffffff);
1337  __ Mov(x19, 0xffffffffffffffff);
1338  __ Mov(x20, 4);
1339  __ Mov(x21, 0x0000000200000000);
1340
1341  __ Smsubl(x9, w17, w18, x20);
1342  __ Smsubl(x10, w18, w18, x20);
1343  __ Smsubl(x11, w19, w19, x20);
1344  __ Smsubl(x12, w19, w19, x21);
1345  __ Umsubl(x13, w17, w18, x20);
1346  __ Umsubl(x14, w18, w18, x20);
1347  __ Umsubl(x15, w19, w19, x20);
1348  __ Umsubl(x22, w19, w19, x21);
1349  END();
1350
1351  RUN();
1352
1353  ASSERT_EQUAL_64(5, x9);
1354  ASSERT_EQUAL_64(3, x10);
1355  ASSERT_EQUAL_64(3, x11);
1356  ASSERT_EQUAL_64(0x00000001ffffffff, x12);
1357  ASSERT_EQUAL_64(0xffffffff00000005, x13);
1358  ASSERT_EQUAL_64(0x0000000200000003, x14);
1359  ASSERT_EQUAL_64(0x0000000200000003, x15);
1360  ASSERT_EQUAL_64(0x00000003ffffffff, x22);
1361
1362  TEARDOWN();
1363}
1364
1365
1366TEST(div) {
1367  SETUP();
1368
1369  START();
1370  __ Mov(x16, 1);
1371  __ Mov(x17, 0xffffffff);
1372  __ Mov(x18, 0xffffffffffffffff);
1373  __ Mov(x19, 0x80000000);
1374  __ Mov(x20, 0x8000000000000000);
1375  __ Mov(x21, 2);
1376
1377  __ Udiv(w0, w16, w16);
1378  __ Udiv(w1, w17, w16);
1379  __ Sdiv(w2, w16, w16);
1380  __ Sdiv(w3, w16, w17);
1381  __ Sdiv(w4, w17, w18);
1382
1383  __ Udiv(x5, x16, x16);
1384  __ Udiv(x6, x17, x18);
1385  __ Sdiv(x7, x16, x16);
1386  __ Sdiv(x8, x16, x17);
1387  __ Sdiv(x9, x17, x18);
1388
1389  __ Udiv(w10, w19, w21);
1390  __ Sdiv(w11, w19, w21);
1391  __ Udiv(x12, x19, x21);
1392  __ Sdiv(x13, x19, x21);
1393  __ Udiv(x14, x20, x21);
1394  __ Sdiv(x15, x20, x21);
1395
1396  __ Udiv(w22, w19, w17);
1397  __ Sdiv(w23, w19, w17);
1398  __ Udiv(x24, x20, x18);
1399  __ Sdiv(x25, x20, x18);
1400
1401  __ Udiv(x26, x16, x21);
1402  __ Sdiv(x27, x16, x21);
1403  __ Udiv(x28, x18, x21);
1404  __ Sdiv(x29, x18, x21);
1405
1406  __ Mov(x17, 0);
1407  __ Udiv(w18, w16, w17);
1408  __ Sdiv(w19, w16, w17);
1409  __ Udiv(x20, x16, x17);
1410  __ Sdiv(x21, x16, x17);
1411  END();
1412
1413  RUN();
1414
1415  ASSERT_EQUAL_64(1, x0);
1416  ASSERT_EQUAL_64(0xffffffff, x1);
1417  ASSERT_EQUAL_64(1, x2);
1418  ASSERT_EQUAL_64(0xffffffff, x3);
1419  ASSERT_EQUAL_64(1, x4);
1420  ASSERT_EQUAL_64(1, x5);
1421  ASSERT_EQUAL_64(0, x6);
1422  ASSERT_EQUAL_64(1, x7);
1423  ASSERT_EQUAL_64(0, x8);
1424  ASSERT_EQUAL_64(0xffffffff00000001, x9);
1425  ASSERT_EQUAL_64(0x40000000, x10);
1426  ASSERT_EQUAL_64(0xC0000000, x11);
1427  ASSERT_EQUAL_64(0x0000000040000000, x12);
1428  ASSERT_EQUAL_64(0x0000000040000000, x13);
1429  ASSERT_EQUAL_64(0x4000000000000000, x14);
1430  ASSERT_EQUAL_64(0xC000000000000000, x15);
1431  ASSERT_EQUAL_64(0, x22);
1432  ASSERT_EQUAL_64(0x80000000, x23);
1433  ASSERT_EQUAL_64(0, x24);
1434  ASSERT_EQUAL_64(0x8000000000000000, x25);
1435  ASSERT_EQUAL_64(0, x26);
1436  ASSERT_EQUAL_64(0, x27);
1437  ASSERT_EQUAL_64(0x7fffffffffffffff, x28);
1438  ASSERT_EQUAL_64(0, x29);
1439  ASSERT_EQUAL_64(0, x18);
1440  ASSERT_EQUAL_64(0, x19);
1441  ASSERT_EQUAL_64(0, x20);
1442  ASSERT_EQUAL_64(0, x21);
1443
1444  TEARDOWN();
1445}
1446
1447
1448TEST(rbit_rev) {
1449  SETUP();
1450
1451  START();
1452  __ Mov(x24, 0xfedcba9876543210);
1453  __ Rbit(w0, w24);
1454  __ Rbit(x1, x24);
1455  __ Rev16(w2, w24);
1456  __ Rev16(x3, x24);
1457  __ Rev(w4, w24);
1458  __ Rev32(x5, x24);
1459  __ Rev(x6, x24);
1460  END();
1461
1462  RUN();
1463
1464  ASSERT_EQUAL_64(0x084c2a6e, x0);
1465  ASSERT_EQUAL_64(0x084c2a6e195d3b7f, x1);
1466  ASSERT_EQUAL_64(0x54761032, x2);
1467  ASSERT_EQUAL_64(0xdcfe98ba54761032, x3);
1468  ASSERT_EQUAL_64(0x10325476, x4);
1469  ASSERT_EQUAL_64(0x98badcfe10325476, x5);
1470  ASSERT_EQUAL_64(0x1032547698badcfe, x6);
1471
1472  TEARDOWN();
1473}
1474
1475
1476TEST(clz_cls) {
1477  SETUP();
1478
1479  START();
1480  __ Mov(x24, 0x0008000000800000);
1481  __ Mov(x25, 0xff800000fff80000);
1482  __ Mov(x26, 0);
1483  __ Clz(w0, w24);
1484  __ Clz(x1, x24);
1485  __ Clz(w2, w25);
1486  __ Clz(x3, x25);
1487  __ Clz(w4, w26);
1488  __ Clz(x5, x26);
1489  __ Cls(w6, w24);
1490  __ Cls(x7, x24);
1491  __ Cls(w8, w25);
1492  __ Cls(x9, x25);
1493  __ Cls(w10, w26);
1494  __ Cls(x11, x26);
1495  END();
1496
1497  RUN();
1498
1499  ASSERT_EQUAL_64(8, x0);
1500  ASSERT_EQUAL_64(12, x1);
1501  ASSERT_EQUAL_64(0, x2);
1502  ASSERT_EQUAL_64(0, x3);
1503  ASSERT_EQUAL_64(32, x4);
1504  ASSERT_EQUAL_64(64, x5);
1505  ASSERT_EQUAL_64(7, x6);
1506  ASSERT_EQUAL_64(11, x7);
1507  ASSERT_EQUAL_64(12, x8);
1508  ASSERT_EQUAL_64(8, x9);
1509  ASSERT_EQUAL_64(31, x10);
1510  ASSERT_EQUAL_64(63, x11);
1511
1512  TEARDOWN();
1513}
1514
1515
1516TEST(label) {
1517  SETUP();
1518
1519  Label label_1, label_2, label_3, label_4;
1520
1521  START();
1522  __ Mov(x0, 0x1);
1523  __ Mov(x1, 0x0);
1524  __ Mov(x22, lr);    // Save lr.
1525
1526  __ B(&label_1);
1527  __ B(&label_1);
1528  __ B(&label_1);     // Multiple branches to the same label.
1529  __ Mov(x0, 0x0);
1530  __ Bind(&label_2);
1531  __ B(&label_3);     // Forward branch.
1532  __ Mov(x0, 0x0);
1533  __ Bind(&label_1);
1534  __ B(&label_2);     // Backward branch.
1535  __ Mov(x0, 0x0);
1536  __ Bind(&label_3);
1537  __ Bl(&label_4);
1538  END();
1539
1540  __ Bind(&label_4);
1541  __ Mov(x1, 0x1);
1542  __ Mov(lr, x22);
1543  END();
1544
1545  RUN();
1546
1547  ASSERT_EQUAL_64(0x1, x0);
1548  ASSERT_EQUAL_64(0x1, x1);
1549
1550  TEARDOWN();
1551}
1552
1553
1554TEST(adr) {
1555  SETUP();
1556
1557  Label label_1, label_2, label_3, label_4;
1558
1559  START();
1560  __ Mov(x0, 0x0);        // Set to non-zero to indicate failure.
1561  __ Adr(x1, &label_3);   // Set to zero to indicate success.
1562
1563  __ Adr(x2, &label_1);   // Multiple forward references to the same label.
1564  __ Adr(x3, &label_1);
1565  __ Adr(x4, &label_1);
1566
1567  __ Bind(&label_2);
1568  __ Eor(x5, x2, Operand(x3));  // Ensure that x2,x3 and x4 are identical.
1569  __ Eor(x6, x2, Operand(x4));
1570  __ Orr(x0, x0, Operand(x5));
1571  __ Orr(x0, x0, Operand(x6));
1572  __ Br(x2);  // label_1, label_3
1573
1574  __ Bind(&label_3);
1575  __ Adr(x2, &label_3);   // Self-reference (offset 0).
1576  __ Eor(x1, x1, Operand(x2));
1577  __ Adr(x2, &label_4);   // Simple forward reference.
1578  __ Br(x2);  // label_4
1579
1580  __ Bind(&label_1);
1581  __ Adr(x2, &label_3);   // Multiple reverse references to the same label.
1582  __ Adr(x3, &label_3);
1583  __ Adr(x4, &label_3);
1584  __ Adr(x5, &label_2);   // Simple reverse reference.
1585  __ Br(x5);  // label_2
1586
1587  __ Bind(&label_4);
1588  END();
1589
1590  RUN();
1591
1592  ASSERT_EQUAL_64(0x0, x0);
1593  ASSERT_EQUAL_64(0x0, x1);
1594
1595  TEARDOWN();
1596}
1597
1598
1599TEST(branch_cond) {
1600  SETUP();
1601
1602  Label wrong;
1603
1604  START();
1605  __ Mov(x0, 0x1);
1606  __ Mov(x1, 0x1);
1607  __ Mov(x2, 0x8000000000000000);
1608
1609  // For each 'cmp' instruction below, condition codes other than the ones
1610  // following it would branch.
1611
1612  __ Cmp(x1, 0);
1613  __ B(&wrong, eq);
1614  __ B(&wrong, lo);
1615  __ B(&wrong, mi);
1616  __ B(&wrong, vs);
1617  __ B(&wrong, ls);
1618  __ B(&wrong, lt);
1619  __ B(&wrong, le);
1620  Label ok_1;
1621  __ B(&ok_1, ne);
1622  __ Mov(x0, 0x0);
1623  __ Bind(&ok_1);
1624
1625  __ Cmp(x1, 1);
1626  __ B(&wrong, ne);
1627  __ B(&wrong, lo);
1628  __ B(&wrong, mi);
1629  __ B(&wrong, vs);
1630  __ B(&wrong, hi);
1631  __ B(&wrong, lt);
1632  __ B(&wrong, gt);
1633  Label ok_2;
1634  __ B(&ok_2, pl);
1635  __ Mov(x0, 0x0);
1636  __ Bind(&ok_2);
1637
1638  __ Cmp(x1, 2);
1639  __ B(&wrong, eq);
1640  __ B(&wrong, hs);
1641  __ B(&wrong, pl);
1642  __ B(&wrong, vs);
1643  __ B(&wrong, hi);
1644  __ B(&wrong, ge);
1645  __ B(&wrong, gt);
1646  Label ok_3;
1647  __ B(&ok_3, vc);
1648  __ Mov(x0, 0x0);
1649  __ Bind(&ok_3);
1650
1651  __ Cmp(x2, 1);
1652  __ B(&wrong, eq);
1653  __ B(&wrong, lo);
1654  __ B(&wrong, mi);
1655  __ B(&wrong, vc);
1656  __ B(&wrong, ls);
1657  __ B(&wrong, ge);
1658  __ B(&wrong, gt);
1659  Label ok_4;
1660  __ B(&ok_4, le);
1661  __ Mov(x0, 0x0);
1662  __ Bind(&ok_4);
1663
1664  Label ok_5;
1665  __ b(&ok_5, al);
1666  __ Mov(x0, 0x0);
1667  __ Bind(&ok_5);
1668
1669  Label ok_6;
1670  __ b(&ok_6, nv);
1671  __ Mov(x0, 0x0);
1672  __ Bind(&ok_6);
1673
1674  END();
1675
1676  __ Bind(&wrong);
1677  __ Mov(x0, 0x0);
1678  END();
1679
1680  RUN();
1681
1682  ASSERT_EQUAL_64(0x1, x0);
1683
1684  TEARDOWN();
1685}
1686
1687
1688TEST(branch_to_reg) {
1689  SETUP();
1690
1691  // Test br.
1692  Label fn1, after_fn1;
1693
1694  START();
1695  __ Mov(x29, lr);
1696
1697  __ Mov(x1, 0);
1698  __ B(&after_fn1);
1699
1700  __ Bind(&fn1);
1701  __ Mov(x0, lr);
1702  __ Mov(x1, 42);
1703  __ Br(x0);
1704
1705  __ Bind(&after_fn1);
1706  __ Bl(&fn1);
1707
1708  // Test blr.
1709  Label fn2, after_fn2;
1710
1711  __ Mov(x2, 0);
1712  __ B(&after_fn2);
1713
1714  __ Bind(&fn2);
1715  __ Mov(x0, lr);
1716  __ Mov(x2, 84);
1717  __ Blr(x0);
1718
1719  __ Bind(&after_fn2);
1720  __ Bl(&fn2);
1721  __ Mov(x3, lr);
1722
1723  __ Mov(lr, x29);
1724  END();
1725
1726  RUN();
1727
1728  ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1729  ASSERT_EQUAL_64(42, x1);
1730  ASSERT_EQUAL_64(84, x2);
1731
1732  TEARDOWN();
1733}
1734
1735
1736TEST(compare_branch) {
1737  SETUP();
1738
1739  START();
1740  __ Mov(x0, 0);
1741  __ Mov(x1, 0);
1742  __ Mov(x2, 0);
1743  __ Mov(x3, 0);
1744  __ Mov(x4, 0);
1745  __ Mov(x5, 0);
1746  __ Mov(x16, 0);
1747  __ Mov(x17, 42);
1748
1749  Label zt, zt_end;
1750  __ Cbz(w16, &zt);
1751  __ B(&zt_end);
1752  __ Bind(&zt);
1753  __ Mov(x0, 1);
1754  __ Bind(&zt_end);
1755
1756  Label zf, zf_end;
1757  __ Cbz(x17, &zf);
1758  __ B(&zf_end);
1759  __ Bind(&zf);
1760  __ Mov(x1, 1);
1761  __ Bind(&zf_end);
1762
1763  Label nzt, nzt_end;
1764  __ Cbnz(w17, &nzt);
1765  __ B(&nzt_end);
1766  __ Bind(&nzt);
1767  __ Mov(x2, 1);
1768  __ Bind(&nzt_end);
1769
1770  Label nzf, nzf_end;
1771  __ Cbnz(x16, &nzf);
1772  __ B(&nzf_end);
1773  __ Bind(&nzf);
1774  __ Mov(x3, 1);
1775  __ Bind(&nzf_end);
1776
1777  __ Mov(x18, 0xffffffff00000000);
1778
1779  Label a, a_end;
1780  __ Cbz(w18, &a);
1781  __ B(&a_end);
1782  __ Bind(&a);
1783  __ Mov(x4, 1);
1784  __ Bind(&a_end);
1785
1786  Label b, b_end;
1787  __ Cbnz(w18, &b);
1788  __ B(&b_end);
1789  __ Bind(&b);
1790  __ Mov(x5, 1);
1791  __ Bind(&b_end);
1792
1793  END();
1794
1795  RUN();
1796
1797  ASSERT_EQUAL_64(1, x0);
1798  ASSERT_EQUAL_64(0, x1);
1799  ASSERT_EQUAL_64(1, x2);
1800  ASSERT_EQUAL_64(0, x3);
1801  ASSERT_EQUAL_64(1, x4);
1802  ASSERT_EQUAL_64(0, x5);
1803
1804  TEARDOWN();
1805}
1806
1807
1808TEST(test_branch) {
1809  SETUP();
1810
1811  START();
1812  __ Mov(x0, 0);
1813  __ Mov(x1, 0);
1814  __ Mov(x2, 0);
1815  __ Mov(x3, 0);
1816  __ Mov(x16, 0xaaaaaaaaaaaaaaaa);
1817
1818  Label bz, bz_end;
1819  __ Tbz(w16, 0, &bz);
1820  __ B(&bz_end);
1821  __ Bind(&bz);
1822  __ Mov(x0, 1);
1823  __ Bind(&bz_end);
1824
1825  Label bo, bo_end;
1826  __ Tbz(x16, 63, &bo);
1827  __ B(&bo_end);
1828  __ Bind(&bo);
1829  __ Mov(x1, 1);
1830  __ Bind(&bo_end);
1831
1832  Label nbz, nbz_end;
1833  __ Tbnz(x16, 61, &nbz);
1834  __ B(&nbz_end);
1835  __ Bind(&nbz);
1836  __ Mov(x2, 1);
1837  __ Bind(&nbz_end);
1838
1839  Label nbo, nbo_end;
1840  __ Tbnz(w16, 2, &nbo);
1841  __ B(&nbo_end);
1842  __ Bind(&nbo);
1843  __ Mov(x3, 1);
1844  __ Bind(&nbo_end);
1845  END();
1846
1847  RUN();
1848
1849  ASSERT_EQUAL_64(1, x0);
1850  ASSERT_EQUAL_64(0, x1);
1851  ASSERT_EQUAL_64(1, x2);
1852  ASSERT_EQUAL_64(0, x3);
1853
1854  TEARDOWN();
1855}
1856
1857
1858TEST(branch_type) {
1859  SETUP();
1860
1861  Label fail, done;
1862
1863  START();
1864  __ Mov(x0, 0x0);
1865  __ Mov(x10, 0x7);
1866  __ Mov(x11, 0x0);
1867
1868  // Test non taken branches.
1869  __ Cmp(x10, 0x7);
1870  __ B(&fail, ne);
1871  __ B(&fail, never);
1872  __ B(&fail, reg_zero, x10);
1873  __ B(&fail, reg_not_zero, x11);
1874  __ B(&fail, reg_bit_clear, x10, 0);
1875  __ B(&fail, reg_bit_set, x10, 3);
1876
1877  // Test taken branches.
1878  Label l1, l2, l3, l4, l5;
1879  __ Cmp(x10, 0x7);
1880  __ B(&l1, eq);
1881  __ B(&fail);
1882  __ Bind(&l1);
1883  __ B(&l2, always);
1884  __ B(&fail);
1885  __ Bind(&l2);
1886  __ B(&l3, reg_not_zero, x10);
1887  __ B(&fail);
1888  __ Bind(&l3);
1889  __ B(&l4, reg_bit_clear, x10, 15);
1890  __ B(&fail);
1891  __ Bind(&l4);
1892  __ B(&l5, reg_bit_set, x10, 1);
1893  __ B(&fail);
1894  __ Bind(&l5);
1895
1896  __ B(&done);
1897
1898  __ Bind(&fail);
1899  __ Mov(x0, 0x1);
1900
1901  __ Bind(&done);
1902
1903  END();
1904
1905  RUN();
1906
1907  ASSERT_EQUAL_64(0x0, x0);
1908
1909  TEARDOWN();
1910}
1911
1912
1913TEST(ldr_str_offset) {
1914  SETUP();
1915
1916  uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
1917  uint64_t dst[5] = {0, 0, 0, 0, 0};
1918  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
1919  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
1920
1921  START();
1922  __ Mov(x17, src_base);
1923  __ Mov(x18, dst_base);
1924  __ Ldr(w0, MemOperand(x17));
1925  __ Str(w0, MemOperand(x18));
1926  __ Ldr(w1, MemOperand(x17, 4));
1927  __ Str(w1, MemOperand(x18, 12));
1928  __ Ldr(x2, MemOperand(x17, 8));
1929  __ Str(x2, MemOperand(x18, 16));
1930  __ Ldrb(w3, MemOperand(x17, 1));
1931  __ Strb(w3, MemOperand(x18, 25));
1932  __ Ldrh(w4, MemOperand(x17, 2));
1933  __ Strh(w4, MemOperand(x18, 33));
1934  END();
1935
1936  RUN();
1937
1938  ASSERT_EQUAL_64(0x76543210, x0);
1939  ASSERT_EQUAL_64(0x76543210, dst[0]);
1940  ASSERT_EQUAL_64(0xfedcba98, x1);
1941  ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
1942  ASSERT_EQUAL_64(0x0123456789abcdef, x2);
1943  ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
1944  ASSERT_EQUAL_64(0x32, x3);
1945  ASSERT_EQUAL_64(0x3200, dst[3]);
1946  ASSERT_EQUAL_64(0x7654, x4);
1947  ASSERT_EQUAL_64(0x765400, dst[4]);
1948  ASSERT_EQUAL_64(src_base, x17);
1949  ASSERT_EQUAL_64(dst_base, x18);
1950
1951  TEARDOWN();
1952}
1953
1954
1955TEST(ldr_str_wide) {
1956  SETUP();
1957
1958  uint32_t src[8192];
1959  uint32_t dst[8192];
1960  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
1961  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
1962  memset(src, 0xaa, 8192 * sizeof(src[0]));
1963  memset(dst, 0xaa, 8192 * sizeof(dst[0]));
1964  src[0] = 0;
1965  src[6144] = 6144;
1966  src[8191] = 8191;
1967
1968  START();
1969  __ Mov(x22, src_base);
1970  __ Mov(x23, dst_base);
1971  __ Mov(x24, src_base);
1972  __ Mov(x25, dst_base);
1973  __ Mov(x26, src_base);
1974  __ Mov(x27, dst_base);
1975
1976  __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
1977  __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
1978  __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
1979  __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
1980  __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
1981  __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
1982  END();
1983
1984  RUN();
1985
1986  ASSERT_EQUAL_32(8191, w0);
1987  ASSERT_EQUAL_32(8191, dst[8191]);
1988  ASSERT_EQUAL_64(src_base, x22);
1989  ASSERT_EQUAL_64(dst_base, x23);
1990  ASSERT_EQUAL_32(0, w1);
1991  ASSERT_EQUAL_32(0, dst[0]);
1992  ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
1993  ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
1994  ASSERT_EQUAL_32(6144, w2);
1995  ASSERT_EQUAL_32(6144, dst[6144]);
1996  ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
1997  ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
1998
1999  TEARDOWN();
2000}
2001
2002
2003TEST(ldr_str_preindex) {
2004  SETUP();
2005
2006  uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
2007  uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2008  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2009  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2010
2011  START();
2012  __ Mov(x17, src_base);
2013  __ Mov(x18, dst_base);
2014  __ Mov(x19, src_base);
2015  __ Mov(x20, dst_base);
2016  __ Mov(x21, src_base + 16);
2017  __ Mov(x22, dst_base + 40);
2018  __ Mov(x23, src_base);
2019  __ Mov(x24, dst_base);
2020  __ Mov(x25, src_base);
2021  __ Mov(x26, dst_base);
2022  __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2023  __ Str(w0, MemOperand(x18, 12, PreIndex));
2024  __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2025  __ Str(x1, MemOperand(x20, 16, PreIndex));
2026  __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2027  __ Str(w2, MemOperand(x22, -4, PreIndex));
2028  __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2029  __ Strb(w3, MemOperand(x24, 25, PreIndex));
2030  __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2031  __ Strh(w4, MemOperand(x26, 41, PreIndex));
2032  END();
2033
2034  RUN();
2035
2036  ASSERT_EQUAL_64(0xfedcba98, x0);
2037  ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2038  ASSERT_EQUAL_64(0x0123456789abcdef, x1);
2039  ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
2040  ASSERT_EQUAL_64(0x01234567, x2);
2041  ASSERT_EQUAL_64(0x0123456700000000, dst[4]);
2042  ASSERT_EQUAL_64(0x32, x3);
2043  ASSERT_EQUAL_64(0x3200, dst[3]);
2044  ASSERT_EQUAL_64(0x9876, x4);
2045  ASSERT_EQUAL_64(0x987600, dst[5]);
2046  ASSERT_EQUAL_64(src_base + 4, x17);
2047  ASSERT_EQUAL_64(dst_base + 12, x18);
2048  ASSERT_EQUAL_64(src_base + 8, x19);
2049  ASSERT_EQUAL_64(dst_base + 16, x20);
2050  ASSERT_EQUAL_64(src_base + 12, x21);
2051  ASSERT_EQUAL_64(dst_base + 36, x22);
2052  ASSERT_EQUAL_64(src_base + 1, x23);
2053  ASSERT_EQUAL_64(dst_base + 25, x24);
2054  ASSERT_EQUAL_64(src_base + 3, x25);
2055  ASSERT_EQUAL_64(dst_base + 41, x26);
2056
2057  TEARDOWN();
2058}
2059
2060
2061TEST(ldr_str_postindex) {
2062  SETUP();
2063
2064  uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
2065  uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2066  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2067  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2068
2069  START();
2070  __ Mov(x17, src_base + 4);
2071  __ Mov(x18, dst_base + 12);
2072  __ Mov(x19, src_base + 8);
2073  __ Mov(x20, dst_base + 16);
2074  __ Mov(x21, src_base + 8);
2075  __ Mov(x22, dst_base + 32);
2076  __ Mov(x23, src_base + 1);
2077  __ Mov(x24, dst_base + 25);
2078  __ Mov(x25, src_base + 3);
2079  __ Mov(x26, dst_base + 41);
2080  __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2081  __ Str(w0, MemOperand(x18, 12, PostIndex));
2082  __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2083  __ Str(x1, MemOperand(x20, 16, PostIndex));
2084  __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2085  __ Str(x2, MemOperand(x22, -32, PostIndex));
2086  __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2087  __ Strb(w3, MemOperand(x24, 5, PostIndex));
2088  __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2089  __ Strh(w4, MemOperand(x26, -41, PostIndex));
2090  END();
2091
2092  RUN();
2093
2094  ASSERT_EQUAL_64(0xfedcba98, x0);
2095  ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2096  ASSERT_EQUAL_64(0x0123456789abcdef, x1);
2097  ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
2098  ASSERT_EQUAL_64(0x0123456789abcdef, x2);
2099  ASSERT_EQUAL_64(0x0123456789abcdef, dst[4]);
2100  ASSERT_EQUAL_64(0x32, x3);
2101  ASSERT_EQUAL_64(0x3200, dst[3]);
2102  ASSERT_EQUAL_64(0x9876, x4);
2103  ASSERT_EQUAL_64(0x987600, dst[5]);
2104  ASSERT_EQUAL_64(src_base + 8, x17);
2105  ASSERT_EQUAL_64(dst_base + 24, x18);
2106  ASSERT_EQUAL_64(src_base + 16, x19);
2107  ASSERT_EQUAL_64(dst_base + 32, x20);
2108  ASSERT_EQUAL_64(src_base, x21);
2109  ASSERT_EQUAL_64(dst_base, x22);
2110  ASSERT_EQUAL_64(src_base + 2, x23);
2111  ASSERT_EQUAL_64(dst_base + 30, x24);
2112  ASSERT_EQUAL_64(src_base, x25);
2113  ASSERT_EQUAL_64(dst_base, x26);
2114
2115  TEARDOWN();
2116}
2117
2118
2119TEST(ldr_str_largeindex) {
2120  SETUP();
2121
2122  // This value won't fit in the immediate offset field of ldr/str instructions.
2123  int largeoffset = 0xabcdef;
2124
2125  int64_t data[3] = { 0x1122334455667788, 0, 0 };
2126  uint64_t base_addr = reinterpret_cast<uintptr_t>(data);
2127  uint64_t drifted_addr = base_addr - largeoffset;
2128
2129  // This test checks that we we can use large immediate offsets when
2130  // using PreIndex or PostIndex addressing mode of the MacroAssembler
2131  // Ldr/Str instructions.
2132
2133  START();
2134  __ Mov(x19, drifted_addr);
2135  __ Ldr(x0, MemOperand(x19, largeoffset, PreIndex));
2136
2137  __ Mov(x20, base_addr);
2138  __ Ldr(x1, MemOperand(x20, largeoffset, PostIndex));
2139
2140  __ Mov(x21, drifted_addr);
2141  __ Str(x0, MemOperand(x21, largeoffset + 8, PreIndex));
2142
2143  __ Mov(x22, base_addr + 16);
2144  __ Str(x0, MemOperand(x22, largeoffset, PostIndex));
2145  END();
2146
2147  RUN();
2148
2149  ASSERT_EQUAL_64(0x1122334455667788, data[0]);
2150  ASSERT_EQUAL_64(0x1122334455667788, data[1]);
2151  ASSERT_EQUAL_64(0x1122334455667788, data[2]);
2152  ASSERT_EQUAL_64(0x1122334455667788, x0);
2153  ASSERT_EQUAL_64(0x1122334455667788, x1);
2154
2155  ASSERT_EQUAL_64(base_addr, x19);
2156  ASSERT_EQUAL_64(base_addr + largeoffset, x20);
2157  ASSERT_EQUAL_64(base_addr + 8, x21);
2158  ASSERT_EQUAL_64(base_addr + 16 + largeoffset, x22);
2159
2160  TEARDOWN();
2161}
2162
2163
2164TEST(load_signed) {
2165  SETUP();
2166
2167  uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2168  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2169
2170  START();
2171  __ Mov(x24, src_base);
2172  __ Ldrsb(w0, MemOperand(x24));
2173  __ Ldrsb(w1, MemOperand(x24, 4));
2174  __ Ldrsh(w2, MemOperand(x24));
2175  __ Ldrsh(w3, MemOperand(x24, 4));
2176  __ Ldrsb(x4, MemOperand(x24));
2177  __ Ldrsb(x5, MemOperand(x24, 4));
2178  __ Ldrsh(x6, MemOperand(x24));
2179  __ Ldrsh(x7, MemOperand(x24, 4));
2180  __ Ldrsw(x8, MemOperand(x24));
2181  __ Ldrsw(x9, MemOperand(x24, 4));
2182  END();
2183
2184  RUN();
2185
2186  ASSERT_EQUAL_64(0xffffff80, x0);
2187  ASSERT_EQUAL_64(0x0000007f, x1);
2188  ASSERT_EQUAL_64(0xffff8080, x2);
2189  ASSERT_EQUAL_64(0x00007f7f, x3);
2190  ASSERT_EQUAL_64(0xffffffffffffff80, x4);
2191  ASSERT_EQUAL_64(0x000000000000007f, x5);
2192  ASSERT_EQUAL_64(0xffffffffffff8080, x6);
2193  ASSERT_EQUAL_64(0x0000000000007f7f, x7);
2194  ASSERT_EQUAL_64(0xffffffff80008080, x8);
2195  ASSERT_EQUAL_64(0x000000007fff7f7f, x9);
2196
2197  TEARDOWN();
2198}
2199
2200
2201TEST(load_store_regoffset) {
2202  SETUP();
2203
2204  uint32_t src[3] = {1, 2, 3};
2205  uint32_t dst[4] = {0, 0, 0, 0};
2206  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2207  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2208
2209  START();
2210  __ Mov(x16, src_base);
2211  __ Mov(x17, dst_base);
2212  __ Mov(x18, src_base + 3 * sizeof(src[0]));
2213  __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2214  __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2215  __ Mov(x24, 0);
2216  __ Mov(x25, 4);
2217  __ Mov(x26, -4);
2218  __ Mov(x27, 0xfffffffc);  // 32-bit -4.
2219  __ Mov(x28, 0xfffffffe);  // 32-bit -2.
2220  __ Mov(x29, 0xffffffff);  // 32-bit -1.
2221
2222  __ Ldr(w0, MemOperand(x16, x24));
2223  __ Ldr(x1, MemOperand(x16, x25));
2224  __ Ldr(w2, MemOperand(x18, x26));
2225  __ Ldr(w3, MemOperand(x18, x27, SXTW));
2226  __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2227  __ Str(w0, MemOperand(x17, x24));
2228  __ Str(x1, MemOperand(x17, x25));
2229  __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2230  END();
2231
2232  RUN();
2233
2234  ASSERT_EQUAL_64(1, x0);
2235  ASSERT_EQUAL_64(0x0000000300000002, x1);
2236  ASSERT_EQUAL_64(3, x2);
2237  ASSERT_EQUAL_64(3, x3);
2238  ASSERT_EQUAL_64(2, x4);
2239  ASSERT_EQUAL_32(1, dst[0]);
2240  ASSERT_EQUAL_32(2, dst[1]);
2241  ASSERT_EQUAL_32(3, dst[2]);
2242  ASSERT_EQUAL_32(3, dst[3]);
2243
2244  TEARDOWN();
2245}
2246
2247
2248TEST(load_store_float) {
2249  SETUP();
2250
2251  float src[3] = {1.0, 2.0, 3.0};
2252  float dst[3] = {0.0, 0.0, 0.0};
2253  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2254  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2255
2256  START();
2257  __ Mov(x17, src_base);
2258  __ Mov(x18, dst_base);
2259  __ Mov(x19, src_base);
2260  __ Mov(x20, dst_base);
2261  __ Mov(x21, src_base);
2262  __ Mov(x22, dst_base);
2263  __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2264  __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2265  __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2266  __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2267  __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2268  __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2269  END();
2270
2271  RUN();
2272
2273  ASSERT_EQUAL_FP32(2.0, s0);
2274  ASSERT_EQUAL_FP32(2.0, dst[0]);
2275  ASSERT_EQUAL_FP32(1.0, s1);
2276  ASSERT_EQUAL_FP32(1.0, dst[2]);
2277  ASSERT_EQUAL_FP32(3.0, s2);
2278  ASSERT_EQUAL_FP32(3.0, dst[1]);
2279  ASSERT_EQUAL_64(src_base, x17);
2280  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2281  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2282  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2283  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2284  ASSERT_EQUAL_64(dst_base, x22);
2285
2286  TEARDOWN();
2287}
2288
2289
2290TEST(load_store_double) {
2291  SETUP();
2292
2293  double src[3] = {1.0, 2.0, 3.0};
2294  double dst[3] = {0.0, 0.0, 0.0};
2295  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2296  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2297
2298  START();
2299  __ Mov(x17, src_base);
2300  __ Mov(x18, dst_base);
2301  __ Mov(x19, src_base);
2302  __ Mov(x20, dst_base);
2303  __ Mov(x21, src_base);
2304  __ Mov(x22, dst_base);
2305  __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2306  __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2307  __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2308  __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2309  __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2310  __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2311  END();
2312
2313  RUN();
2314
2315  ASSERT_EQUAL_FP64(2.0, d0);
2316  ASSERT_EQUAL_FP64(2.0, dst[0]);
2317  ASSERT_EQUAL_FP64(1.0, d1);
2318  ASSERT_EQUAL_FP64(1.0, dst[2]);
2319  ASSERT_EQUAL_FP64(3.0, d2);
2320  ASSERT_EQUAL_FP64(3.0, dst[1]);
2321  ASSERT_EQUAL_64(src_base, x17);
2322  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2323  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2324  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2325  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2326  ASSERT_EQUAL_64(dst_base, x22);
2327
2328  TEARDOWN();
2329}
2330
2331
2332TEST(ldp_stp_float) {
2333  SETUP();
2334
2335  float src[2] = {1.0, 2.0};
2336  float dst[3] = {0.0, 0.0, 0.0};
2337  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2338  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2339
2340  START();
2341  __ Mov(x16, src_base);
2342  __ Mov(x17, dst_base);
2343  __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2344  __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2345  END();
2346
2347  RUN();
2348
2349  ASSERT_EQUAL_FP32(1.0, s31);
2350  ASSERT_EQUAL_FP32(2.0, s0);
2351  ASSERT_EQUAL_FP32(0.0, dst[0]);
2352  ASSERT_EQUAL_FP32(2.0, dst[1]);
2353  ASSERT_EQUAL_FP32(1.0, dst[2]);
2354  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2355  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2356
2357  TEARDOWN();
2358}
2359
2360
2361TEST(ldp_stp_double) {
2362  SETUP();
2363
2364  double src[2] = {1.0, 2.0};
2365  double dst[3] = {0.0, 0.0, 0.0};
2366  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2367  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2368
2369  START();
2370  __ Mov(x16, src_base);
2371  __ Mov(x17, dst_base);
2372  __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2373  __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2374  END();
2375
2376  RUN();
2377
2378  ASSERT_EQUAL_FP64(1.0, d31);
2379  ASSERT_EQUAL_FP64(2.0, d0);
2380  ASSERT_EQUAL_FP64(0.0, dst[0]);
2381  ASSERT_EQUAL_FP64(2.0, dst[1]);
2382  ASSERT_EQUAL_FP64(1.0, dst[2]);
2383  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2384  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2385
2386  TEARDOWN();
2387}
2388
2389
2390TEST(ldp_stp_offset) {
2391  SETUP();
2392
2393  uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2394                     0xffeeddccbbaa9988};
2395  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2396  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2397  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2398
2399  START();
2400  __ Mov(x16, src_base);
2401  __ Mov(x17, dst_base);
2402  __ Mov(x18, src_base + 24);
2403  __ Mov(x19, dst_base + 56);
2404  __ Ldp(w0, w1, MemOperand(x16));
2405  __ Ldp(w2, w3, MemOperand(x16, 4));
2406  __ Ldp(x4, x5, MemOperand(x16, 8));
2407  __ Ldp(w6, w7, MemOperand(x18, -12));
2408  __ Ldp(x8, x9, MemOperand(x18, -16));
2409  __ Stp(w0, w1, MemOperand(x17));
2410  __ Stp(w2, w3, MemOperand(x17, 8));
2411  __ Stp(x4, x5, MemOperand(x17, 16));
2412  __ Stp(w6, w7, MemOperand(x19, -24));
2413  __ Stp(x8, x9, MemOperand(x19, -16));
2414  END();
2415
2416  RUN();
2417
2418  ASSERT_EQUAL_64(0x44556677, x0);
2419  ASSERT_EQUAL_64(0x00112233, x1);
2420  ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
2421  ASSERT_EQUAL_64(0x00112233, x2);
2422  ASSERT_EQUAL_64(0xccddeeff, x3);
2423  ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
2424  ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2425  ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
2426  ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2427  ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
2428  ASSERT_EQUAL_64(0x8899aabb, x6);
2429  ASSERT_EQUAL_64(0xbbaa9988, x7);
2430  ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
2431  ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
2432  ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
2433  ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
2434  ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
2435  ASSERT_EQUAL_64(src_base, x16);
2436  ASSERT_EQUAL_64(dst_base, x17);
2437  ASSERT_EQUAL_64(src_base + 24, x18);
2438  ASSERT_EQUAL_64(dst_base + 56, x19);
2439
2440  TEARDOWN();
2441}
2442
2443
2444TEST(ldnp_stnp_offset) {
2445  SETUP();
2446
2447  uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2448                     0xffeeddccbbaa9988};
2449  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2450  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2451  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2452
2453  START();
2454  __ Mov(x16, src_base);
2455  __ Mov(x17, dst_base);
2456  __ Mov(x18, src_base + 24);
2457  __ Mov(x19, dst_base + 56);
2458  __ Ldnp(w0, w1, MemOperand(x16));
2459  __ Ldnp(w2, w3, MemOperand(x16, 4));
2460  __ Ldnp(x4, x5, MemOperand(x16, 8));
2461  __ Ldnp(w6, w7, MemOperand(x18, -12));
2462  __ Ldnp(x8, x9, MemOperand(x18, -16));
2463  __ Stnp(w0, w1, MemOperand(x17));
2464  __ Stnp(w2, w3, MemOperand(x17, 8));
2465  __ Stnp(x4, x5, MemOperand(x17, 16));
2466  __ Stnp(w6, w7, MemOperand(x19, -24));
2467  __ Stnp(x8, x9, MemOperand(x19, -16));
2468  END();
2469
2470  RUN();
2471
2472  ASSERT_EQUAL_64(0x44556677, x0);
2473  ASSERT_EQUAL_64(0x00112233, x1);
2474  ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
2475  ASSERT_EQUAL_64(0x00112233, x2);
2476  ASSERT_EQUAL_64(0xccddeeff, x3);
2477  ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
2478  ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2479  ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
2480  ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2481  ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
2482  ASSERT_EQUAL_64(0x8899aabb, x6);
2483  ASSERT_EQUAL_64(0xbbaa9988, x7);
2484  ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
2485  ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
2486  ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
2487  ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
2488  ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
2489  ASSERT_EQUAL_64(src_base, x16);
2490  ASSERT_EQUAL_64(dst_base, x17);
2491  ASSERT_EQUAL_64(src_base + 24, x18);
2492  ASSERT_EQUAL_64(dst_base + 56, x19);
2493
2494  TEARDOWN();
2495}
2496
2497
2498TEST(ldp_stp_preindex) {
2499  SETUP();
2500
2501  uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2502                     0xffeeddccbbaa9988};
2503  uint64_t dst[5] = {0, 0, 0, 0, 0};
2504  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2505  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2506
2507  START();
2508  __ Mov(x16, src_base);
2509  __ Mov(x17, dst_base);
2510  __ Mov(x18, dst_base + 16);
2511  __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2512  __ Mov(x19, x16);
2513  __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2514  __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2515  __ Mov(x20, x17);
2516  __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2517  __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2518  __ Mov(x21, x16);
2519  __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2520  __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2521  __ Mov(x22, x18);
2522  __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
2523  END();
2524
2525  RUN();
2526
2527  ASSERT_EQUAL_64(0x00112233, x0);
2528  ASSERT_EQUAL_64(0xccddeeff, x1);
2529  ASSERT_EQUAL_64(0x44556677, x2);
2530  ASSERT_EQUAL_64(0x00112233, x3);
2531  ASSERT_EQUAL_64(0xccddeeff00112233, dst[0]);
2532  ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
2533  ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2534  ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2535  ASSERT_EQUAL_64(0x0011223344556677, x6);
2536  ASSERT_EQUAL_64(0x8899aabbccddeeff, x7);
2537  ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
2538  ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
2539  ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
2540  ASSERT_EQUAL_64(src_base, x16);
2541  ASSERT_EQUAL_64(dst_base, x17);
2542  ASSERT_EQUAL_64(dst_base + 16, x18);
2543  ASSERT_EQUAL_64(src_base + 4, x19);
2544  ASSERT_EQUAL_64(dst_base + 4, x20);
2545  ASSERT_EQUAL_64(src_base + 8, x21);
2546  ASSERT_EQUAL_64(dst_base + 24, x22);
2547
2548  TEARDOWN();
2549}
2550
2551
2552TEST(ldp_stp_postindex) {
2553  SETUP();
2554
2555  uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff,
2556                     0xffeeddccbbaa9988, 0x7766554433221100};
2557  uint64_t dst[5] = {0, 0, 0, 0, 0};
2558  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2559  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2560
2561  START();
2562  __ Mov(x16, src_base);
2563  __ Mov(x17, dst_base);
2564  __ Mov(x18, dst_base + 16);
2565  __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
2566  __ Mov(x19, x16);
2567  __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
2568  __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
2569  __ Mov(x20, x17);
2570  __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
2571  __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
2572  __ Mov(x21, x16);
2573  __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
2574  __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
2575  __ Mov(x22, x18);
2576  __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
2577  END();
2578
2579  RUN();
2580
2581  ASSERT_EQUAL_64(0x44556677, x0);
2582  ASSERT_EQUAL_64(0x00112233, x1);
2583  ASSERT_EQUAL_64(0x00112233, x2);
2584  ASSERT_EQUAL_64(0xccddeeff, x3);
2585  ASSERT_EQUAL_64(0x4455667700112233, dst[0]);
2586  ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
2587  ASSERT_EQUAL_64(0x0011223344556677, x4);
2588  ASSERT_EQUAL_64(0x8899aabbccddeeff, x5);
2589  ASSERT_EQUAL_64(0x8899aabbccddeeff, x6);
2590  ASSERT_EQUAL_64(0xffeeddccbbaa9988, x7);
2591  ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
2592  ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
2593  ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
2594  ASSERT_EQUAL_64(src_base, x16);
2595  ASSERT_EQUAL_64(dst_base, x17);
2596  ASSERT_EQUAL_64(dst_base + 16, x18);
2597  ASSERT_EQUAL_64(src_base + 4, x19);
2598  ASSERT_EQUAL_64(dst_base + 4, x20);
2599  ASSERT_EQUAL_64(src_base + 8, x21);
2600  ASSERT_EQUAL_64(dst_base + 24, x22);
2601
2602  TEARDOWN();
2603}
2604
2605
2606TEST(ldp_sign_extend) {
2607  SETUP();
2608
2609  uint32_t src[2] = {0x80000000, 0x7fffffff};
2610  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2611
2612  START();
2613  __ Mov(x24, src_base);
2614  __ Ldpsw(x0, x1, MemOperand(x24));
2615  END();
2616
2617  RUN();
2618
2619  ASSERT_EQUAL_64(0xffffffff80000000, x0);
2620  ASSERT_EQUAL_64(0x000000007fffffff, x1);
2621
2622  TEARDOWN();
2623}
2624
2625
2626TEST(ldur_stur) {
2627  SETUP();
2628
2629  int64_t src[2] = {0x0123456789abcdef, 0x0123456789abcdef};
2630  int64_t dst[5] = {0, 0, 0, 0, 0};
2631  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2632  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2633
2634  START();
2635  __ Mov(x17, src_base);
2636  __ Mov(x18, dst_base);
2637  __ Mov(x19, src_base + 16);
2638  __ Mov(x20, dst_base + 32);
2639  __ Mov(x21, dst_base + 40);
2640  __ Ldr(w0, MemOperand(x17, 1));
2641  __ Str(w0, MemOperand(x18, 2));
2642  __ Ldr(x1, MemOperand(x17, 3));
2643  __ Str(x1, MemOperand(x18, 9));
2644  __ Ldr(w2, MemOperand(x19, -9));
2645  __ Str(w2, MemOperand(x20, -5));
2646  __ Ldrb(w3, MemOperand(x19, -1));
2647  __ Strb(w3, MemOperand(x21, -1));
2648  END();
2649
2650  RUN();
2651
2652  ASSERT_EQUAL_64(0x6789abcd, x0);
2653  ASSERT_EQUAL_64(0x00006789abcd0000, dst[0]);
2654  ASSERT_EQUAL_64(0xabcdef0123456789, x1);
2655  ASSERT_EQUAL_64(0xcdef012345678900, dst[1]);
2656  ASSERT_EQUAL_64(0x000000ab, dst[2]);
2657  ASSERT_EQUAL_64(0xabcdef01, x2);
2658  ASSERT_EQUAL_64(0x00abcdef01000000, dst[3]);
2659  ASSERT_EQUAL_64(0x00000001, x3);
2660  ASSERT_EQUAL_64(0x0100000000000000, dst[4]);
2661  ASSERT_EQUAL_64(src_base, x17);
2662  ASSERT_EQUAL_64(dst_base, x18);
2663  ASSERT_EQUAL_64(src_base + 16, x19);
2664  ASSERT_EQUAL_64(dst_base + 32, x20);
2665
2666  TEARDOWN();
2667}
2668
2669
2670TEST(ldr_literal) {
2671  SETUP();
2672
2673  START();
2674  __ Ldr(x2, 0x1234567890abcdef);
2675  __ Ldr(w3, 0xfedcba09);
2676  __ Ldr(d13, 1.234);
2677  __ Ldr(s25, 2.5);
2678  END();
2679
2680  RUN();
2681
2682  ASSERT_EQUAL_64(0x1234567890abcdef, x2);
2683  ASSERT_EQUAL_64(0xfedcba09, x3);
2684  ASSERT_EQUAL_FP64(1.234, d13);
2685  ASSERT_EQUAL_FP32(2.5, s25);
2686
2687  TEARDOWN();
2688}
2689
2690
2691static void LdrLiteralRangeHelper(ptrdiff_t range_,
2692                                  LiteralPoolEmitOption option,
2693                                  bool expect_dump) {
2694  VIXL_ASSERT(range_ > 0);
2695  SETUP_SIZE(range_ + 1024);
2696
2697  Label label_1, label_2;
2698
2699  size_t range = static_cast<size_t>(range_);
2700  size_t code_size = 0;
2701  size_t pool_guard_size;
2702
2703  if (option == NoJumpRequired) {
2704    // Space for an explicit branch.
2705    pool_guard_size = sizeof(Instr);
2706  } else {
2707    pool_guard_size = 0;
2708  }
2709
2710  START();
2711  // Force a pool dump so the pool starts off empty.
2712  __ EmitLiteralPool(JumpRequired);
2713  ASSERT_LITERAL_POOL_SIZE(0);
2714
2715  __ Ldr(x0, 0x1234567890abcdef);
2716  __ Ldr(w1, 0xfedcba09);
2717  __ Ldr(d0, 1.234);
2718  __ Ldr(s1, 2.5);
2719  ASSERT_LITERAL_POOL_SIZE(24);
2720
2721  code_size += 4 * sizeof(Instr);
2722
2723  // Check that the requested range (allowing space for a branch over the pool)
2724  // can be handled by this test.
2725  VIXL_ASSERT((code_size + pool_guard_size) <= range);
2726
2727  // Emit NOPs up to 'range', leaving space for the pool guard.
2728  while ((code_size + pool_guard_size) < range) {
2729    __ Nop();
2730    code_size += sizeof(Instr);
2731  }
2732
2733  // Emit the guard sequence before the literal pool.
2734  if (option == NoJumpRequired) {
2735    __ B(&label_1);
2736    code_size += sizeof(Instr);
2737  }
2738
2739  VIXL_ASSERT(code_size == range);
2740  ASSERT_LITERAL_POOL_SIZE(24);
2741
2742  // Possibly generate a literal pool.
2743  __ CheckLiteralPool(option);
2744  __ Bind(&label_1);
2745  if (expect_dump) {
2746    ASSERT_LITERAL_POOL_SIZE(0);
2747  } else {
2748    ASSERT_LITERAL_POOL_SIZE(24);
2749  }
2750
2751  // Force a pool flush to check that a second pool functions correctly.
2752  __ EmitLiteralPool(JumpRequired);
2753  ASSERT_LITERAL_POOL_SIZE(0);
2754
2755  // These loads should be after the pool (and will require a new one).
2756  __ Ldr(x4, 0x34567890abcdef12);
2757  __ Ldr(w5, 0xdcba09fe);
2758  __ Ldr(d4, 123.4);
2759  __ Ldr(s5, 250.0);
2760  ASSERT_LITERAL_POOL_SIZE(24);
2761  END();
2762
2763  RUN();
2764
2765  // Check that the literals loaded correctly.
2766  ASSERT_EQUAL_64(0x1234567890abcdef, x0);
2767  ASSERT_EQUAL_64(0xfedcba09, x1);
2768  ASSERT_EQUAL_FP64(1.234, d0);
2769  ASSERT_EQUAL_FP32(2.5, s1);
2770  ASSERT_EQUAL_64(0x34567890abcdef12, x4);
2771  ASSERT_EQUAL_64(0xdcba09fe, x5);
2772  ASSERT_EQUAL_FP64(123.4, d4);
2773  ASSERT_EQUAL_FP32(250.0, s5);
2774
2775  TEARDOWN();
2776}
2777
2778
2779TEST(ldr_literal_range_1) {
2780  LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
2781                        NoJumpRequired,
2782                        true);
2783}
2784
2785
2786TEST(ldr_literal_range_2) {
2787  LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
2788                        NoJumpRequired,
2789                        false);
2790}
2791
2792
2793TEST(ldr_literal_range_3) {
2794  LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
2795                        JumpRequired,
2796                        true);
2797}
2798
2799
2800TEST(ldr_literal_range_4) {
2801  LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
2802                        JumpRequired,
2803                        false);
2804}
2805
2806
2807TEST(ldr_literal_range_5) {
2808  LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
2809                        JumpRequired,
2810                        false);
2811}
2812
2813
2814TEST(ldr_literal_range_6) {
2815  LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
2816                        JumpRequired,
2817                        false);
2818}
2819
2820
2821TEST(add_sub_imm) {
2822  SETUP();
2823
2824  START();
2825  __ Mov(x0, 0x0);
2826  __ Mov(x1, 0x1111);
2827  __ Mov(x2, 0xffffffffffffffff);
2828  __ Mov(x3, 0x8000000000000000);
2829
2830  __ Add(x10, x0, Operand(0x123));
2831  __ Add(x11, x1, Operand(0x122000));
2832  __ Add(x12, x0, Operand(0xabc << 12));
2833  __ Add(x13, x2, Operand(1));
2834
2835  __ Add(w14, w0, Operand(0x123));
2836  __ Add(w15, w1, Operand(0x122000));
2837  __ Add(w16, w0, Operand(0xabc << 12));
2838  __ Add(w17, w2, Operand(1));
2839
2840  __ Sub(x20, x0, Operand(0x1));
2841  __ Sub(x21, x1, Operand(0x111));
2842  __ Sub(x22, x1, Operand(0x1 << 12));
2843  __ Sub(x23, x3, Operand(1));
2844
2845  __ Sub(w24, w0, Operand(0x1));
2846  __ Sub(w25, w1, Operand(0x111));
2847  __ Sub(w26, w1, Operand(0x1 << 12));
2848  __ Sub(w27, w3, Operand(1));
2849  END();
2850
2851  RUN();
2852
2853  ASSERT_EQUAL_64(0x123, x10);
2854  ASSERT_EQUAL_64(0x123111, x11);
2855  ASSERT_EQUAL_64(0xabc000, x12);
2856  ASSERT_EQUAL_64(0x0, x13);
2857
2858  ASSERT_EQUAL_32(0x123, w14);
2859  ASSERT_EQUAL_32(0x123111, w15);
2860  ASSERT_EQUAL_32(0xabc000, w16);
2861  ASSERT_EQUAL_32(0x0, w17);
2862
2863  ASSERT_EQUAL_64(0xffffffffffffffff, x20);
2864  ASSERT_EQUAL_64(0x1000, x21);
2865  ASSERT_EQUAL_64(0x111, x22);
2866  ASSERT_EQUAL_64(0x7fffffffffffffff, x23);
2867
2868  ASSERT_EQUAL_32(0xffffffff, w24);
2869  ASSERT_EQUAL_32(0x1000, w25);
2870  ASSERT_EQUAL_32(0x111, w26);
2871  ASSERT_EQUAL_32(0xffffffff, w27);
2872
2873  TEARDOWN();
2874}
2875
2876
2877TEST(add_sub_wide_imm) {
2878  SETUP();
2879
2880  START();
2881  __ Mov(x0, 0x0);
2882  __ Mov(x1, 0x1);
2883
2884  __ Add(x10, x0, Operand(0x1234567890abcdef));
2885  __ Add(x11, x1, Operand(0xffffffff));
2886
2887  __ Add(w12, w0, Operand(0x12345678));
2888  __ Add(w13, w1, Operand(0xffffffff));
2889
2890  __ Sub(x20, x0, Operand(0x1234567890abcdef));
2891
2892  __ Sub(w21, w0, Operand(0x12345678));
2893  END();
2894
2895  RUN();
2896
2897  ASSERT_EQUAL_64(0x1234567890abcdef, x10);
2898  ASSERT_EQUAL_64(0x100000000, x11);
2899
2900  ASSERT_EQUAL_32(0x12345678, w12);
2901  ASSERT_EQUAL_64(0x0, x13);
2902
2903  ASSERT_EQUAL_64(-0x1234567890abcdef, x20);
2904
2905  ASSERT_EQUAL_32(-0x12345678, w21);
2906
2907  TEARDOWN();
2908}
2909
2910
2911TEST(add_sub_shifted) {
2912  SETUP();
2913
2914  START();
2915  __ Mov(x0, 0);
2916  __ Mov(x1, 0x0123456789abcdef);
2917  __ Mov(x2, 0xfedcba9876543210);
2918  __ Mov(x3, 0xffffffffffffffff);
2919
2920  __ Add(x10, x1, Operand(x2));
2921  __ Add(x11, x0, Operand(x1, LSL, 8));
2922  __ Add(x12, x0, Operand(x1, LSR, 8));
2923  __ Add(x13, x0, Operand(x1, ASR, 8));
2924  __ Add(x14, x0, Operand(x2, ASR, 8));
2925  __ Add(w15, w0, Operand(w1, ASR, 8));
2926  __ Add(w18, w3, Operand(w1, ROR, 8));
2927  __ Add(x19, x3, Operand(x1, ROR, 8));
2928
2929  __ Sub(x20, x3, Operand(x2));
2930  __ Sub(x21, x3, Operand(x1, LSL, 8));
2931  __ Sub(x22, x3, Operand(x1, LSR, 8));
2932  __ Sub(x23, x3, Operand(x1, ASR, 8));
2933  __ Sub(x24, x3, Operand(x2, ASR, 8));
2934  __ Sub(w25, w3, Operand(w1, ASR, 8));
2935  __ Sub(w26, w3, Operand(w1, ROR, 8));
2936  __ Sub(x27, x3, Operand(x1, ROR, 8));
2937  END();
2938
2939  RUN();
2940
2941  ASSERT_EQUAL_64(0xffffffffffffffff, x10);
2942  ASSERT_EQUAL_64(0x23456789abcdef00, x11);
2943  ASSERT_EQUAL_64(0x000123456789abcd, x12);
2944  ASSERT_EQUAL_64(0x000123456789abcd, x13);
2945  ASSERT_EQUAL_64(0xfffedcba98765432, x14);
2946  ASSERT_EQUAL_64(0xff89abcd, x15);
2947  ASSERT_EQUAL_64(0xef89abcc, x18);
2948  ASSERT_EQUAL_64(0xef0123456789abcc, x19);
2949
2950  ASSERT_EQUAL_64(0x0123456789abcdef, x20);
2951  ASSERT_EQUAL_64(0xdcba9876543210ff, x21);
2952  ASSERT_EQUAL_64(0xfffedcba98765432, x22);
2953  ASSERT_EQUAL_64(0xfffedcba98765432, x23);
2954  ASSERT_EQUAL_64(0x000123456789abcd, x24);
2955  ASSERT_EQUAL_64(0x00765432, x25);
2956  ASSERT_EQUAL_64(0x10765432, x26);
2957  ASSERT_EQUAL_64(0x10fedcba98765432, x27);
2958
2959  TEARDOWN();
2960}
2961
2962
2963TEST(add_sub_extended) {
2964  SETUP();
2965
2966  START();
2967  __ Mov(x0, 0);
2968  __ Mov(x1, 0x0123456789abcdef);
2969  __ Mov(x2, 0xfedcba9876543210);
2970  __ Mov(w3, 0x80);
2971
2972  __ Add(x10, x0, Operand(x1, UXTB, 0));
2973  __ Add(x11, x0, Operand(x1, UXTB, 1));
2974  __ Add(x12, x0, Operand(x1, UXTH, 2));
2975  __ Add(x13, x0, Operand(x1, UXTW, 4));
2976
2977  __ Add(x14, x0, Operand(x1, SXTB, 0));
2978  __ Add(x15, x0, Operand(x1, SXTB, 1));
2979  __ Add(x16, x0, Operand(x1, SXTH, 2));
2980  __ Add(x17, x0, Operand(x1, SXTW, 3));
2981  __ Add(x18, x0, Operand(x2, SXTB, 0));
2982  __ Add(x19, x0, Operand(x2, SXTB, 1));
2983  __ Add(x20, x0, Operand(x2, SXTH, 2));
2984  __ Add(x21, x0, Operand(x2, SXTW, 3));
2985
2986  __ Add(x22, x1, Operand(x2, SXTB, 1));
2987  __ Sub(x23, x1, Operand(x2, SXTB, 1));
2988
2989  __ Add(w24, w1, Operand(w2, UXTB, 2));
2990  __ Add(w25, w0, Operand(w1, SXTB, 0));
2991  __ Add(w26, w0, Operand(w1, SXTB, 1));
2992  __ Add(w27, w2, Operand(w1, SXTW, 3));
2993
2994  __ Add(w28, w0, Operand(w1, SXTW, 3));
2995  __ Add(x29, x0, Operand(w1, SXTW, 3));
2996
2997  __ Sub(x30, x0, Operand(w3, SXTB, 1));
2998  END();
2999
3000  RUN();
3001
3002  ASSERT_EQUAL_64(0xef, x10);
3003  ASSERT_EQUAL_64(0x1de, x11);
3004  ASSERT_EQUAL_64(0x337bc, x12);
3005  ASSERT_EQUAL_64(0x89abcdef0, x13);
3006
3007  ASSERT_EQUAL_64(0xffffffffffffffef, x14);
3008  ASSERT_EQUAL_64(0xffffffffffffffde, x15);
3009  ASSERT_EQUAL_64(0xffffffffffff37bc, x16);
3010  ASSERT_EQUAL_64(0xfffffffc4d5e6f78, x17);
3011  ASSERT_EQUAL_64(0x10, x18);
3012  ASSERT_EQUAL_64(0x20, x19);
3013  ASSERT_EQUAL_64(0xc840, x20);
3014  ASSERT_EQUAL_64(0x3b2a19080, x21);
3015
3016  ASSERT_EQUAL_64(0x0123456789abce0f, x22);
3017  ASSERT_EQUAL_64(0x0123456789abcdcf, x23);
3018
3019  ASSERT_EQUAL_32(0x89abce2f, w24);
3020  ASSERT_EQUAL_32(0xffffffef, w25);
3021  ASSERT_EQUAL_32(0xffffffde, w26);
3022  ASSERT_EQUAL_32(0xc3b2a188, w27);
3023
3024  ASSERT_EQUAL_32(0x4d5e6f78, w28);
3025  ASSERT_EQUAL_64(0xfffffffc4d5e6f78, x29);
3026
3027  ASSERT_EQUAL_64(256, x30);
3028
3029  TEARDOWN();
3030}
3031
3032
3033TEST(add_sub_negative) {
3034  SETUP();
3035
3036  START();
3037  __ Mov(x0, 0);
3038  __ Mov(x1, 4687);
3039  __ Mov(x2, 0x1122334455667788);
3040  __ Mov(w3, 0x11223344);
3041  __ Mov(w4, 400000);
3042
3043  __ Add(x10, x0, -42);
3044  __ Add(x11, x1, -687);
3045  __ Add(x12, x2, -0x88);
3046
3047  __ Sub(x13, x0, -600);
3048  __ Sub(x14, x1, -313);
3049  __ Sub(x15, x2, -0x555);
3050
3051  __ Add(w19, w3, -0x344);
3052  __ Add(w20, w4, -2000);
3053
3054  __ Sub(w21, w3, -0xbc);
3055  __ Sub(w22, w4, -2000);
3056  END();
3057
3058  RUN();
3059
3060  ASSERT_EQUAL_64(-42, x10);
3061  ASSERT_EQUAL_64(4000, x11);
3062  ASSERT_EQUAL_64(0x1122334455667700, x12);
3063
3064  ASSERT_EQUAL_64(600, x13);
3065  ASSERT_EQUAL_64(5000, x14);
3066  ASSERT_EQUAL_64(0x1122334455667cdd, x15);
3067
3068  ASSERT_EQUAL_32(0x11223000, w19);
3069  ASSERT_EQUAL_32(398000, w20);
3070
3071  ASSERT_EQUAL_32(0x11223400, w21);
3072  ASSERT_EQUAL_32(402000, w22);
3073
3074  TEARDOWN();
3075}
3076
3077
3078TEST(add_sub_zero) {
3079  SETUP();
3080
3081  START();
3082  __ Mov(x0, 0);
3083  __ Mov(x1, 0);
3084  __ Mov(x2, 0);
3085
3086  Label blob1;
3087  __ Bind(&blob1);
3088  __ Add(x0, x0, 0);
3089  __ Sub(x1, x1, 0);
3090  __ Sub(x2, x2, xzr);
3091  VIXL_CHECK(__ SizeOfCodeGeneratedSince(&blob1) == 0);
3092
3093  Label blob2;
3094  __ Bind(&blob2);
3095  __ Add(w3, w3, 0);
3096  VIXL_CHECK(__ SizeOfCodeGeneratedSince(&blob2) != 0);
3097
3098  Label blob3;
3099  __ Bind(&blob3);
3100  __ Sub(w3, w3, wzr);
3101  VIXL_CHECK(__ SizeOfCodeGeneratedSince(&blob3) != 0);
3102
3103  END();
3104
3105  RUN();
3106
3107  ASSERT_EQUAL_64(0, x0);
3108  ASSERT_EQUAL_64(0, x1);
3109  ASSERT_EQUAL_64(0, x2);
3110
3111  TEARDOWN();
3112}
3113
3114
3115TEST(claim_drop_zero) {
3116  SETUP();
3117
3118  START();
3119
3120  Label start;
3121  __ Bind(&start);
3122  __ Claim(Operand(0));
3123  __ Drop(Operand(0));
3124  __ Claim(Operand(xzr));
3125  __ Drop(Operand(xzr));
3126  VIXL_CHECK(__ SizeOfCodeGeneratedSince(&start) == 0);
3127
3128  END();
3129
3130  RUN();
3131
3132  TEARDOWN();
3133}
3134
3135
3136TEST(neg) {
3137  SETUP();
3138
3139  START();
3140  __ Mov(x0, 0xf123456789abcdef);
3141
3142  // Immediate.
3143  __ Neg(x1, 0x123);
3144  __ Neg(w2, 0x123);
3145
3146  // Shifted.
3147  __ Neg(x3, Operand(x0, LSL, 1));
3148  __ Neg(w4, Operand(w0, LSL, 2));
3149  __ Neg(x5, Operand(x0, LSR, 3));
3150  __ Neg(w6, Operand(w0, LSR, 4));
3151  __ Neg(x7, Operand(x0, ASR, 5));
3152  __ Neg(w8, Operand(w0, ASR, 6));
3153
3154  // Extended.
3155  __ Neg(w9, Operand(w0, UXTB));
3156  __ Neg(x10, Operand(x0, SXTB, 1));
3157  __ Neg(w11, Operand(w0, UXTH, 2));
3158  __ Neg(x12, Operand(x0, SXTH, 3));
3159  __ Neg(w13, Operand(w0, UXTW, 4));
3160  __ Neg(x14, Operand(x0, SXTW, 4));
3161  END();
3162
3163  RUN();
3164
3165  ASSERT_EQUAL_64(0xfffffffffffffedd, x1);
3166  ASSERT_EQUAL_64(0xfffffedd, x2);
3167  ASSERT_EQUAL_64(0x1db97530eca86422, x3);
3168  ASSERT_EQUAL_64(0xd950c844, x4);
3169  ASSERT_EQUAL_64(0xe1db97530eca8643, x5);
3170  ASSERT_EQUAL_64(0xf7654322, x6);
3171  ASSERT_EQUAL_64(0x0076e5d4c3b2a191, x7);
3172  ASSERT_EQUAL_64(0x01d950c9, x8);
3173  ASSERT_EQUAL_64(0xffffff11, x9);
3174  ASSERT_EQUAL_64(0x0000000000000022, x10);
3175  ASSERT_EQUAL_64(0xfffcc844, x11);
3176  ASSERT_EQUAL_64(0x0000000000019088, x12);
3177  ASSERT_EQUAL_64(0x65432110, x13);
3178  ASSERT_EQUAL_64(0x0000000765432110, x14);
3179
3180  TEARDOWN();
3181}
3182
3183
3184TEST(adc_sbc_shift) {
3185  SETUP();
3186
3187  START();
3188  __ Mov(x0, 0);
3189  __ Mov(x1, 1);
3190  __ Mov(x2, 0x0123456789abcdef);
3191  __ Mov(x3, 0xfedcba9876543210);
3192  __ Mov(x4, 0xffffffffffffffff);
3193
3194  // Clear the C flag.
3195  __ Adds(x0, x0, Operand(0));
3196
3197  __ Adc(x5, x2, Operand(x3));
3198  __ Adc(x6, x0, Operand(x1, LSL, 60));
3199  __ Sbc(x7, x4, Operand(x3, LSR, 4));
3200  __ Adc(x8, x2, Operand(x3, ASR, 4));
3201  __ Adc(x9, x2, Operand(x3, ROR, 8));
3202
3203  __ Adc(w10, w2, Operand(w3));
3204  __ Adc(w11, w0, Operand(w1, LSL, 30));
3205  __ Sbc(w12, w4, Operand(w3, LSR, 4));
3206  __ Adc(w13, w2, Operand(w3, ASR, 4));
3207  __ Adc(w14, w2, Operand(w3, ROR, 8));
3208
3209  // Set the C flag.
3210  __ Cmp(w0, Operand(w0));
3211
3212  __ Adc(x18, x2, Operand(x3));
3213  __ Adc(x19, x0, Operand(x1, LSL, 60));
3214  __ Sbc(x20, x4, Operand(x3, LSR, 4));
3215  __ Adc(x21, x2, Operand(x3, ASR, 4));
3216  __ Adc(x22, x2, Operand(x3, ROR, 8));
3217
3218  __ Adc(w23, w2, Operand(w3));
3219  __ Adc(w24, w0, Operand(w1, LSL, 30));
3220  __ Sbc(w25, w4, Operand(w3, LSR, 4));
3221  __ Adc(w26, w2, Operand(w3, ASR, 4));
3222  __ Adc(w27, w2, Operand(w3, ROR, 8));
3223  END();
3224
3225  RUN();
3226
3227  ASSERT_EQUAL_64(0xffffffffffffffff, x5);
3228  ASSERT_EQUAL_64(INT64_C(1) << 60, x6);
3229  ASSERT_EQUAL_64(0xf0123456789abcdd, x7);
3230  ASSERT_EQUAL_64(0x0111111111111110, x8);
3231  ASSERT_EQUAL_64(0x1222222222222221, x9);
3232
3233  ASSERT_EQUAL_32(0xffffffff, w10);
3234  ASSERT_EQUAL_32(INT32_C(1) << 30, w11);
3235  ASSERT_EQUAL_32(0xf89abcdd, w12);
3236  ASSERT_EQUAL_32(0x91111110, w13);
3237  ASSERT_EQUAL_32(0x9a222221, w14);
3238
3239  ASSERT_EQUAL_64(0xffffffffffffffff + 1, x18);
3240  ASSERT_EQUAL_64((INT64_C(1) << 60) + 1, x19);
3241  ASSERT_EQUAL_64(0xf0123456789abcdd + 1, x20);
3242  ASSERT_EQUAL_64(0x0111111111111110 + 1, x21);
3243  ASSERT_EQUAL_64(0x1222222222222221 + 1, x22);
3244
3245  ASSERT_EQUAL_32(0xffffffff + 1, w23);
3246  ASSERT_EQUAL_32((INT32_C(1) << 30) + 1, w24);
3247  ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
3248  ASSERT_EQUAL_32(0x91111110 + 1, w26);
3249  ASSERT_EQUAL_32(0x9a222221 + 1, w27);
3250
3251  // Check that adc correctly sets the condition flags.
3252  START();
3253  __ Mov(x0, 1);
3254  __ Mov(x1, 0xffffffffffffffff);
3255  // Clear the C flag.
3256  __ Adds(x0, x0, Operand(0));
3257  __ Adcs(x10, x0, Operand(x1));
3258  END();
3259
3260  RUN();
3261
3262  ASSERT_EQUAL_NZCV(ZCFlag);
3263  ASSERT_EQUAL_64(0, x10);
3264
3265  START();
3266  __ Mov(x0, 1);
3267  __ Mov(x1, 0x8000000000000000);
3268  // Clear the C flag.
3269  __ Adds(x0, x0, Operand(0));
3270  __ Adcs(x10, x0, Operand(x1, ASR, 63));
3271  END();
3272
3273  RUN();
3274
3275  ASSERT_EQUAL_NZCV(ZCFlag);
3276  ASSERT_EQUAL_64(0, x10);
3277
3278  START();
3279  __ Mov(x0, 0x10);
3280  __ Mov(x1, 0x07ffffffffffffff);
3281  // Clear the C flag.
3282  __ Adds(x0, x0, Operand(0));
3283  __ Adcs(x10, x0, Operand(x1, LSL, 4));
3284  END();
3285
3286  RUN();
3287
3288  ASSERT_EQUAL_NZCV(NVFlag);
3289  ASSERT_EQUAL_64(0x8000000000000000, x10);
3290
3291  // Check that sbc correctly sets the condition flags.
3292  START();
3293  __ Mov(x0, 0);
3294  __ Mov(x1, 0xffffffffffffffff);
3295  // Clear the C flag.
3296  __ Adds(x0, x0, Operand(0));
3297  __ Sbcs(x10, x0, Operand(x1));
3298  END();
3299
3300  RUN();
3301
3302  ASSERT_EQUAL_NZCV(ZFlag);
3303  ASSERT_EQUAL_64(0, x10);
3304
3305  START();
3306  __ Mov(x0, 1);
3307  __ Mov(x1, 0xffffffffffffffff);
3308  // Clear the C flag.
3309  __ Adds(x0, x0, Operand(0));
3310  __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3311  END();
3312
3313  RUN();
3314
3315  ASSERT_EQUAL_NZCV(NFlag);
3316  ASSERT_EQUAL_64(0x8000000000000001, x10);
3317
3318  START();
3319  __ Mov(x0, 0);
3320  // Clear the C flag.
3321  __ Adds(x0, x0, Operand(0));
3322  __ Sbcs(x10, x0, Operand(0xffffffffffffffff));
3323  END();
3324
3325  RUN();
3326
3327  ASSERT_EQUAL_NZCV(ZFlag);
3328  ASSERT_EQUAL_64(0, x10);
3329
3330  START();
3331  __ Mov(w0, 0x7fffffff);
3332  // Clear the C flag.
3333  __ Adds(x0, x0, Operand(0));
3334  __ Ngcs(w10, w0);
3335  END();
3336
3337  RUN();
3338
3339  ASSERT_EQUAL_NZCV(NFlag);
3340  ASSERT_EQUAL_64(0x80000000, x10);
3341
3342  START();
3343  // Clear the C flag.
3344  __ Adds(x0, x0, Operand(0));
3345  __ Ngcs(x10, 0x7fffffffffffffff);
3346  END();
3347
3348  RUN();
3349
3350  ASSERT_EQUAL_NZCV(NFlag);
3351  ASSERT_EQUAL_64(0x8000000000000000, x10);
3352
3353  START();
3354  __ Mov(x0, 0);
3355  // Set the C flag.
3356  __ Cmp(x0, Operand(x0));
3357  __ Sbcs(x10, x0, Operand(1));
3358  END();
3359
3360  RUN();
3361
3362  ASSERT_EQUAL_NZCV(NFlag);
3363  ASSERT_EQUAL_64(0xffffffffffffffff, x10);
3364
3365  START();
3366  __ Mov(x0, 0);
3367  // Set the C flag.
3368  __ Cmp(x0, Operand(x0));
3369  __ Ngcs(x10, 0x7fffffffffffffff);
3370  END();
3371
3372  RUN();
3373
3374  ASSERT_EQUAL_NZCV(NFlag);
3375  ASSERT_EQUAL_64(0x8000000000000001, x10);
3376
3377  TEARDOWN();
3378}
3379
3380
3381TEST(adc_sbc_extend) {
3382  SETUP();
3383
3384  START();
3385  // Clear the C flag.
3386  __ Adds(x0, x0, Operand(0));
3387
3388  __ Mov(x0, 0);
3389  __ Mov(x1, 1);
3390  __ Mov(x2, 0x0123456789abcdef);
3391
3392  __ Adc(x10, x1, Operand(w2, UXTB, 1));
3393  __ Adc(x11, x1, Operand(x2, SXTH, 2));
3394  __ Sbc(x12, x1, Operand(w2, UXTW, 4));
3395  __ Adc(x13, x1, Operand(x2, UXTX, 4));
3396
3397  __ Adc(w14, w1, Operand(w2, UXTB, 1));
3398  __ Adc(w15, w1, Operand(w2, SXTH, 2));
3399  __ Adc(w9, w1, Operand(w2, UXTW, 4));
3400
3401  // Set the C flag.
3402  __ Cmp(w0, Operand(w0));
3403
3404  __ Adc(x20, x1, Operand(w2, UXTB, 1));
3405  __ Adc(x21, x1, Operand(x2, SXTH, 2));
3406  __ Sbc(x22, x1, Operand(w2, UXTW, 4));
3407  __ Adc(x23, x1, Operand(x2, UXTX, 4));
3408
3409  __ Adc(w24, w1, Operand(w2, UXTB, 1));
3410  __ Adc(w25, w1, Operand(w2, SXTH, 2));
3411  __ Adc(w26, w1, Operand(w2, UXTW, 4));
3412  END();
3413
3414  RUN();
3415
3416  ASSERT_EQUAL_64(0x1df, x10);
3417  ASSERT_EQUAL_64(0xffffffffffff37bd, x11);
3418  ASSERT_EQUAL_64(0xfffffff765432110, x12);
3419  ASSERT_EQUAL_64(0x123456789abcdef1, x13);
3420
3421  ASSERT_EQUAL_32(0x1df, w14);
3422  ASSERT_EQUAL_32(0xffff37bd, w15);
3423  ASSERT_EQUAL_32(0x9abcdef1, w9);
3424
3425  ASSERT_EQUAL_64(0x1df + 1, x20);
3426  ASSERT_EQUAL_64(0xffffffffffff37bd + 1, x21);
3427  ASSERT_EQUAL_64(0xfffffff765432110 + 1, x22);
3428  ASSERT_EQUAL_64(0x123456789abcdef1 + 1, x23);
3429
3430  ASSERT_EQUAL_32(0x1df + 1, w24);
3431  ASSERT_EQUAL_32(0xffff37bd + 1, w25);
3432  ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
3433
3434  // Check that adc correctly sets the condition flags.
3435  START();
3436  __ Mov(x0, 0xff);
3437  __ Mov(x1, 0xffffffffffffffff);
3438  // Clear the C flag.
3439  __ Adds(x0, x0, Operand(0));
3440  __ Adcs(x10, x0, Operand(x1, SXTX, 1));
3441  END();
3442
3443  RUN();
3444
3445  ASSERT_EQUAL_NZCV(CFlag);
3446
3447  START();
3448  __ Mov(x0, 0x7fffffffffffffff);
3449  __ Mov(x1, 1);
3450  // Clear the C flag.
3451  __ Adds(x0, x0, Operand(0));
3452  __ Adcs(x10, x0, Operand(x1, UXTB, 2));
3453  END();
3454
3455  RUN();
3456
3457  ASSERT_EQUAL_NZCV(NVFlag);
3458
3459  START();
3460  __ Mov(x0, 0x7fffffffffffffff);
3461  // Clear the C flag.
3462  __ Adds(x0, x0, Operand(0));
3463  __ Adcs(x10, x0, Operand(1));
3464  END();
3465
3466  RUN();
3467
3468  ASSERT_EQUAL_NZCV(NVFlag);
3469
3470  TEARDOWN();
3471}
3472
3473
3474TEST(adc_sbc_wide_imm) {
3475  SETUP();
3476
3477  START();
3478  __ Mov(x0, 0);
3479
3480  // Clear the C flag.
3481  __ Adds(x0, x0, Operand(0));
3482
3483  __ Adc(x7, x0, Operand(0x1234567890abcdef));
3484  __ Adc(w8, w0, Operand(0xffffffff));
3485  __ Sbc(x9, x0, Operand(0x1234567890abcdef));
3486  __ Sbc(w10, w0, Operand(0xffffffff));
3487  __ Ngc(x11, Operand(0xffffffff00000000));
3488  __ Ngc(w12, Operand(0xffff0000));
3489
3490  // Set the C flag.
3491  __ Cmp(w0, Operand(w0));
3492
3493  __ Adc(x18, x0, Operand(0x1234567890abcdef));
3494  __ Adc(w19, w0, Operand(0xffffffff));
3495  __ Sbc(x20, x0, Operand(0x1234567890abcdef));
3496  __ Sbc(w21, w0, Operand(0xffffffff));
3497  __ Ngc(x22, Operand(0xffffffff00000000));
3498  __ Ngc(w23, Operand(0xffff0000));
3499  END();
3500
3501  RUN();
3502
3503  ASSERT_EQUAL_64(0x1234567890abcdef, x7);
3504  ASSERT_EQUAL_64(0xffffffff, x8);
3505  ASSERT_EQUAL_64(0xedcba9876f543210, x9);
3506  ASSERT_EQUAL_64(0, x10);
3507  ASSERT_EQUAL_64(0xffffffff, x11);
3508  ASSERT_EQUAL_64(0xffff, x12);
3509
3510  ASSERT_EQUAL_64(0x1234567890abcdef + 1, x18);
3511  ASSERT_EQUAL_64(0, x19);
3512  ASSERT_EQUAL_64(0xedcba9876f543211, x20);
3513  ASSERT_EQUAL_64(1, x21);
3514  ASSERT_EQUAL_64(0x0000000100000000, x22);
3515  ASSERT_EQUAL_64(0x0000000000010000, x23);
3516
3517  TEARDOWN();
3518}
3519
3520TEST(flags) {
3521  SETUP();
3522
3523  START();
3524  __ Mov(x0, 0);
3525  __ Mov(x1, 0x1111111111111111);
3526  __ Neg(x10, Operand(x0));
3527  __ Neg(x11, Operand(x1));
3528  __ Neg(w12, Operand(w1));
3529  // Clear the C flag.
3530  __ Adds(x0, x0, Operand(0));
3531  __ Ngc(x13, Operand(x0));
3532  // Set the C flag.
3533  __ Cmp(x0, Operand(x0));
3534  __ Ngc(w14, Operand(w0));
3535  END();
3536
3537  RUN();
3538
3539  ASSERT_EQUAL_64(0, x10);
3540  ASSERT_EQUAL_64(-0x1111111111111111, x11);
3541  ASSERT_EQUAL_32(-0x11111111, w12);
3542  ASSERT_EQUAL_64(-1, x13);
3543  ASSERT_EQUAL_32(0, w14);
3544
3545  START();
3546  __ Mov(x0, 0);
3547  __ Cmp(x0, Operand(x0));
3548  END();
3549
3550  RUN();
3551
3552  ASSERT_EQUAL_NZCV(ZCFlag);
3553
3554  START();
3555  __ Mov(w0, 0);
3556  __ Cmp(w0, Operand(w0));
3557  END();
3558
3559  RUN();
3560
3561  ASSERT_EQUAL_NZCV(ZCFlag);
3562
3563  START();
3564  __ Mov(x0, 0);
3565  __ Mov(x1, 0x1111111111111111);
3566  __ Cmp(x0, Operand(x1));
3567  END();
3568
3569  RUN();
3570
3571  ASSERT_EQUAL_NZCV(NFlag);
3572
3573  START();
3574  __ Mov(w0, 0);
3575  __ Mov(w1, 0x11111111);
3576  __ Cmp(w0, Operand(w1));
3577  END();
3578
3579  RUN();
3580
3581  ASSERT_EQUAL_NZCV(NFlag);
3582
3583  START();
3584  __ Mov(x1, 0x1111111111111111);
3585  __ Cmp(x1, Operand(0));
3586  END();
3587
3588  RUN();
3589
3590  ASSERT_EQUAL_NZCV(CFlag);
3591
3592  START();
3593  __ Mov(w1, 0x11111111);
3594  __ Cmp(w1, Operand(0));
3595  END();
3596
3597  RUN();
3598
3599  ASSERT_EQUAL_NZCV(CFlag);
3600
3601  START();
3602  __ Mov(x0, 1);
3603  __ Mov(x1, 0x7fffffffffffffff);
3604  __ Cmn(x1, Operand(x0));
3605  END();
3606
3607  RUN();
3608
3609  ASSERT_EQUAL_NZCV(NVFlag);
3610
3611  START();
3612  __ Mov(w0, 1);
3613  __ Mov(w1, 0x7fffffff);
3614  __ Cmn(w1, Operand(w0));
3615  END();
3616
3617  RUN();
3618
3619  ASSERT_EQUAL_NZCV(NVFlag);
3620
3621  START();
3622  __ Mov(x0, 1);
3623  __ Mov(x1, 0xffffffffffffffff);
3624  __ Cmn(x1, Operand(x0));
3625  END();
3626
3627  RUN();
3628
3629  ASSERT_EQUAL_NZCV(ZCFlag);
3630
3631  START();
3632  __ Mov(w0, 1);
3633  __ Mov(w1, 0xffffffff);
3634  __ Cmn(w1, Operand(w0));
3635  END();
3636
3637  RUN();
3638
3639  ASSERT_EQUAL_NZCV(ZCFlag);
3640
3641  START();
3642  __ Mov(w0, 0);
3643  __ Mov(w1, 1);
3644  // Clear the C flag.
3645  __ Adds(w0, w0, Operand(0));
3646  __ Ngcs(w0, Operand(w1));
3647  END();
3648
3649  RUN();
3650
3651  ASSERT_EQUAL_NZCV(NFlag);
3652
3653  START();
3654  __ Mov(w0, 0);
3655  __ Mov(w1, 0);
3656  // Set the C flag.
3657  __ Cmp(w0, Operand(w0));
3658  __ Ngcs(w0, Operand(w1));
3659  END();
3660
3661  RUN();
3662
3663  ASSERT_EQUAL_NZCV(ZCFlag);
3664
3665  TEARDOWN();
3666}
3667
3668
3669TEST(cmp_shift) {
3670  SETUP();
3671
3672  START();
3673  __ Mov(x18, 0xf0000000);
3674  __ Mov(x19, 0xf000000010000000);
3675  __ Mov(x20, 0xf0000000f0000000);
3676  __ Mov(x21, 0x7800000078000000);
3677  __ Mov(x22, 0x3c0000003c000000);
3678  __ Mov(x23, 0x8000000780000000);
3679  __ Mov(x24, 0x0000000f00000000);
3680  __ Mov(x25, 0x00000003c0000000);
3681  __ Mov(x26, 0x8000000780000000);
3682  __ Mov(x27, 0xc0000003);
3683
3684  __ Cmp(w20, Operand(w21, LSL, 1));
3685  __ Mrs(x0, NZCV);
3686
3687  __ Cmp(x20, Operand(x22, LSL, 2));
3688  __ Mrs(x1, NZCV);
3689
3690  __ Cmp(w19, Operand(w23, LSR, 3));
3691  __ Mrs(x2, NZCV);
3692
3693  __ Cmp(x18, Operand(x24, LSR, 4));
3694  __ Mrs(x3, NZCV);
3695
3696  __ Cmp(w20, Operand(w25, ASR, 2));
3697  __ Mrs(x4, NZCV);
3698
3699  __ Cmp(x20, Operand(x26, ASR, 3));
3700  __ Mrs(x5, NZCV);
3701
3702  __ Cmp(w27, Operand(w22, ROR, 28));
3703  __ Mrs(x6, NZCV);
3704
3705  __ Cmp(x20, Operand(x21, ROR, 31));
3706  __ Mrs(x7, NZCV);
3707  END();
3708
3709  RUN();
3710
3711  ASSERT_EQUAL_32(ZCFlag, w0);
3712  ASSERT_EQUAL_32(ZCFlag, w1);
3713  ASSERT_EQUAL_32(ZCFlag, w2);
3714  ASSERT_EQUAL_32(ZCFlag, w3);
3715  ASSERT_EQUAL_32(ZCFlag, w4);
3716  ASSERT_EQUAL_32(ZCFlag, w5);
3717  ASSERT_EQUAL_32(ZCFlag, w6);
3718  ASSERT_EQUAL_32(ZCFlag, w7);
3719
3720  TEARDOWN();
3721}
3722
3723
3724TEST(cmp_extend) {
3725  SETUP();
3726
3727  START();
3728  __ Mov(w20, 0x2);
3729  __ Mov(w21, 0x1);
3730  __ Mov(x22, 0xffffffffffffffff);
3731  __ Mov(x23, 0xff);
3732  __ Mov(x24, 0xfffffffffffffffe);
3733  __ Mov(x25, 0xffff);
3734  __ Mov(x26, 0xffffffff);
3735
3736  __ Cmp(w20, Operand(w21, LSL, 1));
3737  __ Mrs(x0, NZCV);
3738
3739  __ Cmp(x22, Operand(x23, SXTB, 0));
3740  __ Mrs(x1, NZCV);
3741
3742  __ Cmp(x24, Operand(x23, SXTB, 1));
3743  __ Mrs(x2, NZCV);
3744
3745  __ Cmp(x24, Operand(x23, UXTB, 1));
3746  __ Mrs(x3, NZCV);
3747
3748  __ Cmp(w22, Operand(w25, UXTH));
3749  __ Mrs(x4, NZCV);
3750
3751  __ Cmp(x22, Operand(x25, SXTH));
3752  __ Mrs(x5, NZCV);
3753
3754  __ Cmp(x22, Operand(x26, UXTW));
3755  __ Mrs(x6, NZCV);
3756
3757  __ Cmp(x24, Operand(x26, SXTW, 1));
3758  __ Mrs(x7, NZCV);
3759  END();
3760
3761  RUN();
3762
3763  ASSERT_EQUAL_32(ZCFlag, w0);
3764  ASSERT_EQUAL_32(ZCFlag, w1);
3765  ASSERT_EQUAL_32(ZCFlag, w2);
3766  ASSERT_EQUAL_32(NCFlag, w3);
3767  ASSERT_EQUAL_32(NCFlag, w4);
3768  ASSERT_EQUAL_32(ZCFlag, w5);
3769  ASSERT_EQUAL_32(NCFlag, w6);
3770  ASSERT_EQUAL_32(ZCFlag, w7);
3771
3772  TEARDOWN();
3773}
3774
3775
3776TEST(ccmp) {
3777  SETUP();
3778
3779  START();
3780  __ Mov(w16, 0);
3781  __ Mov(w17, 1);
3782  __ Cmp(w16, w16);
3783  __ Ccmp(w16, w17, NCFlag, eq);
3784  __ Mrs(x0, NZCV);
3785
3786  __ Cmp(w16, w16);
3787  __ Ccmp(w16, w17, NCFlag, ne);
3788  __ Mrs(x1, NZCV);
3789
3790  __ Cmp(x16, x16);
3791  __ Ccmn(x16, 2, NZCVFlag, eq);
3792  __ Mrs(x2, NZCV);
3793
3794  __ Cmp(x16, x16);
3795  __ Ccmn(x16, 2, NZCVFlag, ne);
3796  __ Mrs(x3, NZCV);
3797
3798  __ ccmp(x16, x16, NZCVFlag, al);
3799  __ Mrs(x4, NZCV);
3800
3801  __ ccmp(x16, x16, NZCVFlag, nv);
3802  __ Mrs(x5, NZCV);
3803
3804  END();
3805
3806  RUN();
3807
3808  ASSERT_EQUAL_32(NFlag, w0);
3809  ASSERT_EQUAL_32(NCFlag, w1);
3810  ASSERT_EQUAL_32(NoFlag, w2);
3811  ASSERT_EQUAL_32(NZCVFlag, w3);
3812  ASSERT_EQUAL_32(ZCFlag, w4);
3813  ASSERT_EQUAL_32(ZCFlag, w5);
3814
3815  TEARDOWN();
3816}
3817
3818
3819TEST(ccmp_wide_imm) {
3820  SETUP();
3821
3822  START();
3823  __ Mov(w20, 0);
3824
3825  __ Cmp(w20, Operand(w20));
3826  __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
3827  __ Mrs(x0, NZCV);
3828
3829  __ Cmp(w20, Operand(w20));
3830  __ Ccmp(x20, Operand(0xffffffffffffffff), NZCVFlag, eq);
3831  __ Mrs(x1, NZCV);
3832  END();
3833
3834  RUN();
3835
3836  ASSERT_EQUAL_32(NFlag, w0);
3837  ASSERT_EQUAL_32(NoFlag, w1);
3838
3839  TEARDOWN();
3840}
3841
3842
3843TEST(ccmp_shift_extend) {
3844  SETUP();
3845
3846  START();
3847  __ Mov(w20, 0x2);
3848  __ Mov(w21, 0x1);
3849  __ Mov(x22, 0xffffffffffffffff);
3850  __ Mov(x23, 0xff);
3851  __ Mov(x24, 0xfffffffffffffffe);
3852
3853  __ Cmp(w20, Operand(w20));
3854  __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
3855  __ Mrs(x0, NZCV);
3856
3857  __ Cmp(w20, Operand(w20));
3858  __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
3859  __ Mrs(x1, NZCV);
3860
3861  __ Cmp(w20, Operand(w20));
3862  __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
3863  __ Mrs(x2, NZCV);
3864
3865  __ Cmp(w20, Operand(w20));
3866  __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
3867  __ Mrs(x3, NZCV);
3868
3869  __ Cmp(w20, Operand(w20));
3870  __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
3871  __ Mrs(x4, NZCV);
3872  END();
3873
3874  RUN();
3875
3876  ASSERT_EQUAL_32(ZCFlag, w0);
3877  ASSERT_EQUAL_32(ZCFlag, w1);
3878  ASSERT_EQUAL_32(ZCFlag, w2);
3879  ASSERT_EQUAL_32(NCFlag, w3);
3880  ASSERT_EQUAL_32(NZCVFlag, w4);
3881
3882  TEARDOWN();
3883}
3884
3885
3886TEST(csel) {
3887  SETUP();
3888
3889  START();
3890  __ Mov(x16, 0);
3891  __ Mov(x24, 0x0000000f0000000f);
3892  __ Mov(x25, 0x0000001f0000001f);
3893
3894  __ Cmp(w16, Operand(0));
3895  __ Csel(w0, w24, w25, eq);
3896  __ Csel(w1, w24, w25, ne);
3897  __ Csinc(w2, w24, w25, mi);
3898  __ Csinc(w3, w24, w25, pl);
3899
3900  __ csel(w13, w24, w25, al);
3901  __ csel(x14, x24, x25, nv);
3902
3903  __ Cmp(x16, Operand(1));
3904  __ Csinv(x4, x24, x25, gt);
3905  __ Csinv(x5, x24, x25, le);
3906  __ Csneg(x6, x24, x25, hs);
3907  __ Csneg(x7, x24, x25, lo);
3908
3909  __ Cset(w8, ne);
3910  __ Csetm(w9, ne);
3911  __ Cinc(x10, x25, ne);
3912  __ Cinv(x11, x24, ne);
3913  __ Cneg(x12, x24, ne);
3914
3915  __ csel(w15, w24, w25, al);
3916  __ csel(x17, x24, x25, nv);
3917
3918  END();
3919
3920  RUN();
3921
3922  ASSERT_EQUAL_64(0x0000000f, x0);
3923  ASSERT_EQUAL_64(0x0000001f, x1);
3924  ASSERT_EQUAL_64(0x00000020, x2);
3925  ASSERT_EQUAL_64(0x0000000f, x3);
3926  ASSERT_EQUAL_64(0xffffffe0ffffffe0, x4);
3927  ASSERT_EQUAL_64(0x0000000f0000000f, x5);
3928  ASSERT_EQUAL_64(0xffffffe0ffffffe1, x6);
3929  ASSERT_EQUAL_64(0x0000000f0000000f, x7);
3930  ASSERT_EQUAL_64(0x00000001, x8);
3931  ASSERT_EQUAL_64(0xffffffff, x9);
3932  ASSERT_EQUAL_64(0x0000001f00000020, x10);
3933  ASSERT_EQUAL_64(0xfffffff0fffffff0, x11);
3934  ASSERT_EQUAL_64(0xfffffff0fffffff1, x12);
3935  ASSERT_EQUAL_64(0x0000000f, x13);
3936  ASSERT_EQUAL_64(0x0000000f0000000f, x14);
3937  ASSERT_EQUAL_64(0x0000000f, x15);
3938  ASSERT_EQUAL_64(0x0000000f0000000f, x17);
3939
3940  TEARDOWN();
3941}
3942
3943
3944TEST(csel_imm) {
3945  SETUP();
3946
3947  START();
3948  __ Mov(x18, 0);
3949  __ Mov(x19, 0x80000000);
3950  __ Mov(x20, 0x8000000000000000);
3951
3952  __ Cmp(x18, Operand(0));
3953  __ Csel(w0, w19, -2, ne);
3954  __ Csel(w1, w19, -1, ne);
3955  __ Csel(w2, w19, 0, ne);
3956  __ Csel(w3, w19, 1, ne);
3957  __ Csel(w4, w19, 2, ne);
3958  __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
3959  __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
3960  __ Csel(w7, w19, 3, eq);
3961
3962  __ Csel(x8, x20, -2, ne);
3963  __ Csel(x9, x20, -1, ne);
3964  __ Csel(x10, x20, 0, ne);
3965  __ Csel(x11, x20, 1, ne);
3966  __ Csel(x12, x20, 2, ne);
3967  __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
3968  __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
3969  __ Csel(x15, x20, 3, eq);
3970
3971  END();
3972
3973  RUN();
3974
3975  ASSERT_EQUAL_32(-2, w0);
3976  ASSERT_EQUAL_32(-1, w1);
3977  ASSERT_EQUAL_32(0, w2);
3978  ASSERT_EQUAL_32(1, w3);
3979  ASSERT_EQUAL_32(2, w4);
3980  ASSERT_EQUAL_32(-1, w5);
3981  ASSERT_EQUAL_32(0x40000000, w6);
3982  ASSERT_EQUAL_32(0x80000000, w7);
3983
3984  ASSERT_EQUAL_64(-2, x8);
3985  ASSERT_EQUAL_64(-1, x9);
3986  ASSERT_EQUAL_64(0, x10);
3987  ASSERT_EQUAL_64(1, x11);
3988  ASSERT_EQUAL_64(2, x12);
3989  ASSERT_EQUAL_64(-1, x13);
3990  ASSERT_EQUAL_64(0x4000000000000000, x14);
3991  ASSERT_EQUAL_64(0x8000000000000000, x15);
3992
3993  TEARDOWN();
3994}
3995
3996
3997TEST(lslv) {
3998  SETUP();
3999
4000  uint64_t value = 0x0123456789abcdef;
4001  int shift[] = {1, 3, 5, 9, 17, 33};
4002
4003  START();
4004  __ Mov(x0, value);
4005  __ Mov(w1, shift[0]);
4006  __ Mov(w2, shift[1]);
4007  __ Mov(w3, shift[2]);
4008  __ Mov(w4, shift[3]);
4009  __ Mov(w5, shift[4]);
4010  __ Mov(w6, shift[5]);
4011
4012  __ lslv(x0, x0, xzr);
4013
4014  __ Lsl(x16, x0, x1);
4015  __ Lsl(x17, x0, x2);
4016  __ Lsl(x18, x0, x3);
4017  __ Lsl(x19, x0, x4);
4018  __ Lsl(x20, x0, x5);
4019  __ Lsl(x21, x0, x6);
4020
4021  __ Lsl(w22, w0, w1);
4022  __ Lsl(w23, w0, w2);
4023  __ Lsl(w24, w0, w3);
4024  __ Lsl(w25, w0, w4);
4025  __ Lsl(w26, w0, w5);
4026  __ Lsl(w27, w0, w6);
4027  END();
4028
4029  RUN();
4030
4031  ASSERT_EQUAL_64(value, x0);
4032  ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
4033  ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
4034  ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
4035  ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
4036  ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
4037  ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
4038  ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
4039  ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
4040  ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
4041  ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
4042  ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
4043  ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
4044
4045  TEARDOWN();
4046}
4047
4048
4049TEST(lsrv) {
4050  SETUP();
4051
4052  uint64_t value = 0x0123456789abcdef;
4053  int shift[] = {1, 3, 5, 9, 17, 33};
4054
4055  START();
4056  __ Mov(x0, value);
4057  __ Mov(w1, shift[0]);
4058  __ Mov(w2, shift[1]);
4059  __ Mov(w3, shift[2]);
4060  __ Mov(w4, shift[3]);
4061  __ Mov(w5, shift[4]);
4062  __ Mov(w6, shift[5]);
4063
4064  __ lsrv(x0, x0, xzr);
4065
4066  __ Lsr(x16, x0, x1);
4067  __ Lsr(x17, x0, x2);
4068  __ Lsr(x18, x0, x3);
4069  __ Lsr(x19, x0, x4);
4070  __ Lsr(x20, x0, x5);
4071  __ Lsr(x21, x0, x6);
4072
4073  __ Lsr(w22, w0, w1);
4074  __ Lsr(w23, w0, w2);
4075  __ Lsr(w24, w0, w3);
4076  __ Lsr(w25, w0, w4);
4077  __ Lsr(w26, w0, w5);
4078  __ Lsr(w27, w0, w6);
4079  END();
4080
4081  RUN();
4082
4083  ASSERT_EQUAL_64(value, x0);
4084  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4085  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4086  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4087  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4088  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4089  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4090
4091  value &= 0xffffffff;
4092  ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
4093  ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
4094  ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
4095  ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
4096  ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
4097  ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
4098
4099  TEARDOWN();
4100}
4101
4102
4103TEST(asrv) {
4104  SETUP();
4105
4106  int64_t value = 0xfedcba98fedcba98;
4107  int shift[] = {1, 3, 5, 9, 17, 33};
4108
4109  START();
4110  __ Mov(x0, value);
4111  __ Mov(w1, shift[0]);
4112  __ Mov(w2, shift[1]);
4113  __ Mov(w3, shift[2]);
4114  __ Mov(w4, shift[3]);
4115  __ Mov(w5, shift[4]);
4116  __ Mov(w6, shift[5]);
4117
4118  __ asrv(x0, x0, xzr);
4119
4120  __ Asr(x16, x0, x1);
4121  __ Asr(x17, x0, x2);
4122  __ Asr(x18, x0, x3);
4123  __ Asr(x19, x0, x4);
4124  __ Asr(x20, x0, x5);
4125  __ Asr(x21, x0, x6);
4126
4127  __ Asr(w22, w0, w1);
4128  __ Asr(w23, w0, w2);
4129  __ Asr(w24, w0, w3);
4130  __ Asr(w25, w0, w4);
4131  __ Asr(w26, w0, w5);
4132  __ Asr(w27, w0, w6);
4133  END();
4134
4135  RUN();
4136
4137  ASSERT_EQUAL_64(value, x0);
4138  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4139  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4140  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4141  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4142  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4143  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4144
4145  int32_t value32 = static_cast<int32_t>(value & 0xffffffff);
4146  ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
4147  ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
4148  ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
4149  ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
4150  ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
4151  ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
4152
4153  TEARDOWN();
4154}
4155
4156
4157TEST(rorv) {
4158  SETUP();
4159
4160  uint64_t value = 0x0123456789abcdef;
4161  int shift[] = {4, 8, 12, 16, 24, 36};
4162
4163  START();
4164  __ Mov(x0, value);
4165  __ Mov(w1, shift[0]);
4166  __ Mov(w2, shift[1]);
4167  __ Mov(w3, shift[2]);
4168  __ Mov(w4, shift[3]);
4169  __ Mov(w5, shift[4]);
4170  __ Mov(w6, shift[5]);
4171
4172  __ rorv(x0, x0, xzr);
4173
4174  __ Ror(x16, x0, x1);
4175  __ Ror(x17, x0, x2);
4176  __ Ror(x18, x0, x3);
4177  __ Ror(x19, x0, x4);
4178  __ Ror(x20, x0, x5);
4179  __ Ror(x21, x0, x6);
4180
4181  __ Ror(w22, w0, w1);
4182  __ Ror(w23, w0, w2);
4183  __ Ror(w24, w0, w3);
4184  __ Ror(w25, w0, w4);
4185  __ Ror(w26, w0, w5);
4186  __ Ror(w27, w0, w6);
4187  END();
4188
4189  RUN();
4190
4191  ASSERT_EQUAL_64(value, x0);
4192  ASSERT_EQUAL_64(0xf0123456789abcde, x16);
4193  ASSERT_EQUAL_64(0xef0123456789abcd, x17);
4194  ASSERT_EQUAL_64(0xdef0123456789abc, x18);
4195  ASSERT_EQUAL_64(0xcdef0123456789ab, x19);
4196  ASSERT_EQUAL_64(0xabcdef0123456789, x20);
4197  ASSERT_EQUAL_64(0x789abcdef0123456, x21);
4198  ASSERT_EQUAL_32(0xf89abcde, w22);
4199  ASSERT_EQUAL_32(0xef89abcd, w23);
4200  ASSERT_EQUAL_32(0xdef89abc, w24);
4201  ASSERT_EQUAL_32(0xcdef89ab, w25);
4202  ASSERT_EQUAL_32(0xabcdef89, w26);
4203  ASSERT_EQUAL_32(0xf89abcde, w27);
4204
4205  TEARDOWN();
4206}
4207
4208
4209TEST(bfm) {
4210  SETUP();
4211
4212  START();
4213  __ Mov(x1, 0x0123456789abcdef);
4214
4215  __ Mov(x10, 0x8888888888888888);
4216  __ Mov(x11, 0x8888888888888888);
4217  __ Mov(x12, 0x8888888888888888);
4218  __ Mov(x13, 0x8888888888888888);
4219  __ Mov(w20, 0x88888888);
4220  __ Mov(w21, 0x88888888);
4221
4222  __ bfm(x10, x1, 16, 31);
4223  __ bfm(x11, x1, 32, 15);
4224
4225  __ bfm(w20, w1, 16, 23);
4226  __ bfm(w21, w1, 24, 15);
4227
4228  // Aliases.
4229  __ Bfi(x12, x1, 16, 8);
4230  __ Bfxil(x13, x1, 16, 8);
4231  END();
4232
4233  RUN();
4234
4235
4236  ASSERT_EQUAL_64(0x88888888888889ab, x10);
4237  ASSERT_EQUAL_64(0x8888cdef88888888, x11);
4238
4239  ASSERT_EQUAL_32(0x888888ab, w20);
4240  ASSERT_EQUAL_32(0x88cdef88, w21);
4241
4242  ASSERT_EQUAL_64(0x8888888888ef8888, x12);
4243  ASSERT_EQUAL_64(0x88888888888888ab, x13);
4244
4245  TEARDOWN();
4246}
4247
4248
4249TEST(sbfm) {
4250  SETUP();
4251
4252  START();
4253  __ Mov(x1, 0x0123456789abcdef);
4254  __ Mov(x2, 0xfedcba9876543210);
4255
4256  __ sbfm(x10, x1, 16, 31);
4257  __ sbfm(x11, x1, 32, 15);
4258  __ sbfm(x12, x1, 32, 47);
4259  __ sbfm(x13, x1, 48, 35);
4260
4261  __ sbfm(w14, w1, 16, 23);
4262  __ sbfm(w15, w1, 24, 15);
4263  __ sbfm(w16, w2, 16, 23);
4264  __ sbfm(w17, w2, 24, 15);
4265
4266  // Aliases.
4267  __ Asr(x18, x1, 32);
4268  __ Asr(x19, x2, 32);
4269  __ Sbfiz(x20, x1, 8, 16);
4270  __ Sbfiz(x21, x2, 8, 16);
4271  __ Sbfx(x22, x1, 8, 16);
4272  __ Sbfx(x23, x2, 8, 16);
4273  __ Sxtb(x24, w1);
4274  __ Sxtb(x25, x2);
4275  __ Sxth(x26, w1);
4276  __ Sxth(x27, x2);
4277  __ Sxtw(x28, w1);
4278  __ Sxtw(x29, x2);
4279  END();
4280
4281  RUN();
4282
4283
4284  ASSERT_EQUAL_64(0xffffffffffff89ab, x10);
4285  ASSERT_EQUAL_64(0xffffcdef00000000, x11);
4286  ASSERT_EQUAL_64(0x0000000000004567, x12);
4287  ASSERT_EQUAL_64(0x000789abcdef0000, x13);
4288
4289  ASSERT_EQUAL_32(0xffffffab, w14);
4290  ASSERT_EQUAL_32(0xffcdef00, w15);
4291  ASSERT_EQUAL_32(0x00000054, w16);
4292  ASSERT_EQUAL_32(0x00321000, w17);
4293
4294  ASSERT_EQUAL_64(0x0000000001234567, x18);
4295  ASSERT_EQUAL_64(0xfffffffffedcba98, x19);
4296  ASSERT_EQUAL_64(0xffffffffffcdef00, x20);
4297  ASSERT_EQUAL_64(0x0000000000321000, x21);
4298  ASSERT_EQUAL_64(0xffffffffffffabcd, x22);
4299  ASSERT_EQUAL_64(0x0000000000005432, x23);
4300  ASSERT_EQUAL_64(0xffffffffffffffef, x24);
4301  ASSERT_EQUAL_64(0x0000000000000010, x25);
4302  ASSERT_EQUAL_64(0xffffffffffffcdef, x26);
4303  ASSERT_EQUAL_64(0x0000000000003210, x27);
4304  ASSERT_EQUAL_64(0xffffffff89abcdef, x28);
4305  ASSERT_EQUAL_64(0x0000000076543210, x29);
4306
4307  TEARDOWN();
4308}
4309
4310
4311TEST(ubfm) {
4312  SETUP();
4313
4314  START();
4315  __ Mov(x1, 0x0123456789abcdef);
4316  __ Mov(x2, 0xfedcba9876543210);
4317
4318  __ Mov(x10, 0x8888888888888888);
4319  __ Mov(x11, 0x8888888888888888);
4320
4321  __ ubfm(x10, x1, 16, 31);
4322  __ ubfm(x11, x1, 32, 15);
4323  __ ubfm(x12, x1, 32, 47);
4324  __ ubfm(x13, x1, 48, 35);
4325
4326  __ ubfm(w25, w1, 16, 23);
4327  __ ubfm(w26, w1, 24, 15);
4328  __ ubfm(w27, w2, 16, 23);
4329  __ ubfm(w28, w2, 24, 15);
4330
4331  // Aliases
4332  __ Lsl(x15, x1, 63);
4333  __ Lsl(x16, x1, 0);
4334  __ Lsr(x17, x1, 32);
4335  __ Ubfiz(x18, x1, 8, 16);
4336  __ Ubfx(x19, x1, 8, 16);
4337  __ Uxtb(x20, x1);
4338  __ Uxth(x21, x1);
4339  __ Uxtw(x22, x1);
4340  END();
4341
4342  RUN();
4343
4344  ASSERT_EQUAL_64(0x00000000000089ab, x10);
4345  ASSERT_EQUAL_64(0x0000cdef00000000, x11);
4346  ASSERT_EQUAL_64(0x0000000000004567, x12);
4347  ASSERT_EQUAL_64(0x000789abcdef0000, x13);
4348
4349  ASSERT_EQUAL_32(0x000000ab, w25);
4350  ASSERT_EQUAL_32(0x00cdef00, w26);
4351  ASSERT_EQUAL_32(0x00000054, w27);
4352  ASSERT_EQUAL_32(0x00321000, w28);
4353
4354  ASSERT_EQUAL_64(0x8000000000000000, x15);
4355  ASSERT_EQUAL_64(0x0123456789abcdef, x16);
4356  ASSERT_EQUAL_64(0x0000000001234567, x17);
4357  ASSERT_EQUAL_64(0x0000000000cdef00, x18);
4358  ASSERT_EQUAL_64(0x000000000000abcd, x19);
4359  ASSERT_EQUAL_64(0x00000000000000ef, x20);
4360  ASSERT_EQUAL_64(0x000000000000cdef, x21);
4361  ASSERT_EQUAL_64(0x0000000089abcdef, x22);
4362
4363  TEARDOWN();
4364}
4365
4366
4367TEST(extr) {
4368  SETUP();
4369
4370  START();
4371  __ Mov(x1, 0x0123456789abcdef);
4372  __ Mov(x2, 0xfedcba9876543210);
4373
4374  __ Extr(w10, w1, w2, 0);
4375  __ Extr(w11, w1, w2, 1);
4376  __ Extr(x12, x2, x1, 2);
4377
4378  __ Ror(w13, w1, 0);
4379  __ Ror(w14, w2, 17);
4380  __ Ror(w15, w1, 31);
4381  __ Ror(x18, x2, 1);
4382  __ Ror(x19, x1, 63);
4383  END();
4384
4385  RUN();
4386
4387  ASSERT_EQUAL_64(0x76543210, x10);
4388  ASSERT_EQUAL_64(0xbb2a1908, x11);
4389  ASSERT_EQUAL_64(0x0048d159e26af37b, x12);
4390  ASSERT_EQUAL_64(0x89abcdef, x13);
4391  ASSERT_EQUAL_64(0x19083b2a, x14);
4392  ASSERT_EQUAL_64(0x13579bdf, x15);
4393  ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908, x18);
4394  ASSERT_EQUAL_64(0x02468acf13579bde, x19);
4395
4396  TEARDOWN();
4397}
4398
4399
4400TEST(fmov_imm) {
4401  SETUP();
4402
4403  START();
4404  __ Fmov(s11, 1.0);
4405  __ Fmov(d22, -13.0);
4406  __ Fmov(s1, 255.0);
4407  __ Fmov(d2, 12.34567);
4408  __ Fmov(s3, 0.0);
4409  __ Fmov(d4, 0.0);
4410  __ Fmov(s5, kFP32PositiveInfinity);
4411  __ Fmov(d6, kFP64NegativeInfinity);
4412  END();
4413
4414  RUN();
4415
4416  ASSERT_EQUAL_FP32(1.0, s11);
4417  ASSERT_EQUAL_FP64(-13.0, d22);
4418  ASSERT_EQUAL_FP32(255.0, s1);
4419  ASSERT_EQUAL_FP64(12.34567, d2);
4420  ASSERT_EQUAL_FP32(0.0, s3);
4421  ASSERT_EQUAL_FP64(0.0, d4);
4422  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
4423  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
4424
4425  TEARDOWN();
4426}
4427
4428
4429TEST(fmov_reg) {
4430  SETUP();
4431
4432  START();
4433  __ Fmov(s20, 1.0);
4434  __ Fmov(w10, s20);
4435  __ Fmov(s30, w10);
4436  __ Fmov(s5, s20);
4437  __ Fmov(d1, -13.0);
4438  __ Fmov(x1, d1);
4439  __ Fmov(d2, x1);
4440  __ Fmov(d4, d1);
4441  __ Fmov(d6, rawbits_to_double(0x0123456789abcdef));
4442  __ Fmov(s6, s6);
4443  END();
4444
4445  RUN();
4446
4447  ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
4448  ASSERT_EQUAL_FP32(1.0, s30);
4449  ASSERT_EQUAL_FP32(1.0, s5);
4450  ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
4451  ASSERT_EQUAL_FP64(-13.0, d2);
4452  ASSERT_EQUAL_FP64(-13.0, d4);
4453  ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
4454
4455  TEARDOWN();
4456}
4457
4458
4459TEST(fadd) {
4460  SETUP();
4461
4462  START();
4463  __ Fmov(s14, -0.0f);
4464  __ Fmov(s15, kFP32PositiveInfinity);
4465  __ Fmov(s16, kFP32NegativeInfinity);
4466  __ Fmov(s17, 3.25f);
4467  __ Fmov(s18, 1.0f);
4468  __ Fmov(s19, 0.0f);
4469
4470  __ Fmov(d26, -0.0);
4471  __ Fmov(d27, kFP64PositiveInfinity);
4472  __ Fmov(d28, kFP64NegativeInfinity);
4473  __ Fmov(d29, 0.0);
4474  __ Fmov(d30, -2.0);
4475  __ Fmov(d31, 2.25);
4476
4477  __ Fadd(s0, s17, s18);
4478  __ Fadd(s1, s18, s19);
4479  __ Fadd(s2, s14, s18);
4480  __ Fadd(s3, s15, s18);
4481  __ Fadd(s4, s16, s18);
4482  __ Fadd(s5, s15, s16);
4483  __ Fadd(s6, s16, s15);
4484
4485  __ Fadd(d7, d30, d31);
4486  __ Fadd(d8, d29, d31);
4487  __ Fadd(d9, d26, d31);
4488  __ Fadd(d10, d27, d31);
4489  __ Fadd(d11, d28, d31);
4490  __ Fadd(d12, d27, d28);
4491  __ Fadd(d13, d28, d27);
4492  END();
4493
4494  RUN();
4495
4496  ASSERT_EQUAL_FP32(4.25, s0);
4497  ASSERT_EQUAL_FP32(1.0, s1);
4498  ASSERT_EQUAL_FP32(1.0, s2);
4499  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
4500  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
4501  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
4502  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
4503  ASSERT_EQUAL_FP64(0.25, d7);
4504  ASSERT_EQUAL_FP64(2.25, d8);
4505  ASSERT_EQUAL_FP64(2.25, d9);
4506  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
4507  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
4508  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
4509  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
4510
4511  TEARDOWN();
4512}
4513
4514
4515TEST(fsub) {
4516  SETUP();
4517
4518  START();
4519  __ Fmov(s14, -0.0f);
4520  __ Fmov(s15, kFP32PositiveInfinity);
4521  __ Fmov(s16, kFP32NegativeInfinity);
4522  __ Fmov(s17, 3.25f);
4523  __ Fmov(s18, 1.0f);
4524  __ Fmov(s19, 0.0f);
4525
4526  __ Fmov(d26, -0.0);
4527  __ Fmov(d27, kFP64PositiveInfinity);
4528  __ Fmov(d28, kFP64NegativeInfinity);
4529  __ Fmov(d29, 0.0);
4530  __ Fmov(d30, -2.0);
4531  __ Fmov(d31, 2.25);
4532
4533  __ Fsub(s0, s17, s18);
4534  __ Fsub(s1, s18, s19);
4535  __ Fsub(s2, s14, s18);
4536  __ Fsub(s3, s18, s15);
4537  __ Fsub(s4, s18, s16);
4538  __ Fsub(s5, s15, s15);
4539  __ Fsub(s6, s16, s16);
4540
4541  __ Fsub(d7, d30, d31);
4542  __ Fsub(d8, d29, d31);
4543  __ Fsub(d9, d26, d31);
4544  __ Fsub(d10, d31, d27);
4545  __ Fsub(d11, d31, d28);
4546  __ Fsub(d12, d27, d27);
4547  __ Fsub(d13, d28, d28);
4548  END();
4549
4550  RUN();
4551
4552  ASSERT_EQUAL_FP32(2.25, s0);
4553  ASSERT_EQUAL_FP32(1.0, s1);
4554  ASSERT_EQUAL_FP32(-1.0, s2);
4555  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
4556  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
4557  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
4558  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
4559  ASSERT_EQUAL_FP64(-4.25, d7);
4560  ASSERT_EQUAL_FP64(-2.25, d8);
4561  ASSERT_EQUAL_FP64(-2.25, d9);
4562  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
4563  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
4564  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
4565  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
4566
4567  TEARDOWN();
4568}
4569
4570
4571TEST(fmul) {
4572  SETUP();
4573
4574  START();
4575  __ Fmov(s14, -0.0f);
4576  __ Fmov(s15, kFP32PositiveInfinity);
4577  __ Fmov(s16, kFP32NegativeInfinity);
4578  __ Fmov(s17, 3.25f);
4579  __ Fmov(s18, 2.0f);
4580  __ Fmov(s19, 0.0f);
4581  __ Fmov(s20, -2.0f);
4582
4583  __ Fmov(d26, -0.0);
4584  __ Fmov(d27, kFP64PositiveInfinity);
4585  __ Fmov(d28, kFP64NegativeInfinity);
4586  __ Fmov(d29, 0.0);
4587  __ Fmov(d30, -2.0);
4588  __ Fmov(d31, 2.25);
4589
4590  __ Fmul(s0, s17, s18);
4591  __ Fmul(s1, s18, s19);
4592  __ Fmul(s2, s14, s14);
4593  __ Fmul(s3, s15, s20);
4594  __ Fmul(s4, s16, s20);
4595  __ Fmul(s5, s15, s19);
4596  __ Fmul(s6, s19, s16);
4597
4598  __ Fmul(d7, d30, d31);
4599  __ Fmul(d8, d29, d31);
4600  __ Fmul(d9, d26, d26);
4601  __ Fmul(d10, d27, d30);
4602  __ Fmul(d11, d28, d30);
4603  __ Fmul(d12, d27, d29);
4604  __ Fmul(d13, d29, d28);
4605  END();
4606
4607  RUN();
4608
4609  ASSERT_EQUAL_FP32(6.5, s0);
4610  ASSERT_EQUAL_FP32(0.0, s1);
4611  ASSERT_EQUAL_FP32(0.0, s2);
4612  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
4613  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
4614  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
4615  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
4616  ASSERT_EQUAL_FP64(-4.5, d7);
4617  ASSERT_EQUAL_FP64(0.0, d8);
4618  ASSERT_EQUAL_FP64(0.0, d9);
4619  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
4620  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
4621  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
4622  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
4623
4624  TEARDOWN();
4625}
4626
4627
4628static void FmaddFmsubHelper(double n, double m, double a,
4629                             double fmadd, double fmsub,
4630                             double fnmadd, double fnmsub) {
4631  SETUP();
4632  START();
4633
4634  __ Fmov(d0, n);
4635  __ Fmov(d1, m);
4636  __ Fmov(d2, a);
4637  __ Fmadd(d28, d0, d1, d2);
4638  __ Fmsub(d29, d0, d1, d2);
4639  __ Fnmadd(d30, d0, d1, d2);
4640  __ Fnmsub(d31, d0, d1, d2);
4641
4642  END();
4643  RUN();
4644
4645  ASSERT_EQUAL_FP64(fmadd, d28);
4646  ASSERT_EQUAL_FP64(fmsub, d29);
4647  ASSERT_EQUAL_FP64(fnmadd, d30);
4648  ASSERT_EQUAL_FP64(fnmsub, d31);
4649
4650  TEARDOWN();
4651}
4652
4653
4654TEST(fmadd_fmsub_double) {
4655  // It's hard to check the result of fused operations because the only way to
4656  // calculate the result is using fma, which is what the simulator uses anyway.
4657  // TODO(jbramley): Add tests to check behaviour against a hardware trace.
4658
4659  // Basic operation.
4660  FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
4661  FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
4662
4663  // Check the sign of exact zeroes.
4664  //               n     m     a     fmadd  fmsub  fnmadd fnmsub
4665  FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
4666  FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
4667  FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
4668  FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
4669  FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
4670  FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
4671  FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
4672  FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
4673
4674  // Check NaN generation.
4675  FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
4676                   kFP64DefaultNaN, kFP64DefaultNaN,
4677                   kFP64DefaultNaN, kFP64DefaultNaN);
4678  FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
4679                   kFP64DefaultNaN, kFP64DefaultNaN,
4680                   kFP64DefaultNaN, kFP64DefaultNaN);
4681  FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
4682                   kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
4683                   kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
4684                   kFP64NegativeInfinity,   // -inf + (-inf * 1) = -inf
4685                   kFP64DefaultNaN);        // -inf + ( inf * 1) = NaN
4686  FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
4687                   kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
4688                   kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
4689                   kFP64DefaultNaN,         // -inf + ( inf * 1) = NaN
4690                   kFP64NegativeInfinity);  // -inf + (-inf * 1) = -inf
4691}
4692
4693
4694static void FmaddFmsubHelper(float n, float m, float a,
4695                             float fmadd, float fmsub,
4696                             float fnmadd, float fnmsub) {
4697  SETUP();
4698  START();
4699
4700  __ Fmov(s0, n);
4701  __ Fmov(s1, m);
4702  __ Fmov(s2, a);
4703  __ Fmadd(s28, s0, s1, s2);
4704  __ Fmsub(s29, s0, s1, s2);
4705  __ Fnmadd(s30, s0, s1, s2);
4706  __ Fnmsub(s31, s0, s1, s2);
4707
4708  END();
4709  RUN();
4710
4711  ASSERT_EQUAL_FP32(fmadd, s28);
4712  ASSERT_EQUAL_FP32(fmsub, s29);
4713  ASSERT_EQUAL_FP32(fnmadd, s30);
4714  ASSERT_EQUAL_FP32(fnmsub, s31);
4715
4716  TEARDOWN();
4717}
4718
4719
4720TEST(fmadd_fmsub_float) {
4721  // It's hard to check the result of fused operations because the only way to
4722  // calculate the result is using fma, which is what the simulator uses anyway.
4723  // TODO(jbramley): Add tests to check behaviour against a hardware trace.
4724
4725  // Basic operation.
4726  FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
4727  FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
4728
4729  // Check the sign of exact zeroes.
4730  //               n      m      a      fmadd  fmsub  fnmadd fnmsub
4731  FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
4732  FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
4733  FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
4734  FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
4735  FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
4736  FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
4737  FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
4738  FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
4739
4740  // Check NaN generation.
4741  FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
4742                   kFP32DefaultNaN, kFP32DefaultNaN,
4743                   kFP32DefaultNaN, kFP32DefaultNaN);
4744  FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
4745                   kFP32DefaultNaN, kFP32DefaultNaN,
4746                   kFP32DefaultNaN, kFP32DefaultNaN);
4747  FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
4748                   kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
4749                   kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
4750                   kFP32NegativeInfinity,   // -inf + (-inf * 1) = -inf
4751                   kFP32DefaultNaN);        // -inf + ( inf * 1) = NaN
4752  FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
4753                   kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
4754                   kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
4755                   kFP32DefaultNaN,         // -inf + ( inf * 1) = NaN
4756                   kFP32NegativeInfinity);  // -inf + (-inf * 1) = -inf
4757}
4758
4759
4760TEST(fmadd_fmsub_double_nans) {
4761  // Make sure that NaN propagation works correctly.
4762  double s1 = rawbits_to_double(0x7ff5555511111111);
4763  double s2 = rawbits_to_double(0x7ff5555522222222);
4764  double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
4765  double q1 = rawbits_to_double(0x7ffaaaaa11111111);
4766  double q2 = rawbits_to_double(0x7ffaaaaa22222222);
4767  double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
4768  VIXL_ASSERT(IsSignallingNaN(s1));
4769  VIXL_ASSERT(IsSignallingNaN(s2));
4770  VIXL_ASSERT(IsSignallingNaN(sa));
4771  VIXL_ASSERT(IsQuietNaN(q1));
4772  VIXL_ASSERT(IsQuietNaN(q2));
4773  VIXL_ASSERT(IsQuietNaN(qa));
4774
4775  // The input NaNs after passing through ProcessNaN.
4776  double s1_proc = rawbits_to_double(0x7ffd555511111111);
4777  double s2_proc = rawbits_to_double(0x7ffd555522222222);
4778  double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
4779  double q1_proc = q1;
4780  double q2_proc = q2;
4781  double qa_proc = qa;
4782  VIXL_ASSERT(IsQuietNaN(s1_proc));
4783  VIXL_ASSERT(IsQuietNaN(s2_proc));
4784  VIXL_ASSERT(IsQuietNaN(sa_proc));
4785  VIXL_ASSERT(IsQuietNaN(q1_proc));
4786  VIXL_ASSERT(IsQuietNaN(q2_proc));
4787  VIXL_ASSERT(IsQuietNaN(qa_proc));
4788
4789  // Negated NaNs as it would be done on ARMv8 hardware.
4790  double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
4791  double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
4792  double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
4793  double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
4794  VIXL_ASSERT(IsQuietNaN(s1_proc_neg));
4795  VIXL_ASSERT(IsQuietNaN(sa_proc_neg));
4796  VIXL_ASSERT(IsQuietNaN(q1_proc_neg));
4797  VIXL_ASSERT(IsQuietNaN(qa_proc_neg));
4798
4799  // Quiet NaNs are propagated.
4800  FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
4801  FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
4802  FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4803  FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
4804  FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4805  FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4806  FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4807
4808  // Signalling NaNs are propagated, and made quiet.
4809  FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4810  FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
4811  FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4812  FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4813  FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4814  FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4815  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4816
4817  // Signalling NaNs take precedence over quiet NaNs.
4818  FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4819  FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
4820  FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4821  FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4822  FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4823  FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4824  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4825
4826  // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
4827  FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
4828                   kFP64DefaultNaN, kFP64DefaultNaN,
4829                   kFP64DefaultNaN, kFP64DefaultNaN);
4830  FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
4831                   kFP64DefaultNaN, kFP64DefaultNaN,
4832                   kFP64DefaultNaN, kFP64DefaultNaN);
4833  FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
4834                   kFP64DefaultNaN, kFP64DefaultNaN,
4835                   kFP64DefaultNaN, kFP64DefaultNaN);
4836  FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
4837                   kFP64DefaultNaN, kFP64DefaultNaN,
4838                   kFP64DefaultNaN, kFP64DefaultNaN);
4839}
4840
4841
4842TEST(fmadd_fmsub_float_nans) {
4843  // Make sure that NaN propagation works correctly.
4844  float s1 = rawbits_to_float(0x7f951111);
4845  float s2 = rawbits_to_float(0x7f952222);
4846  float sa = rawbits_to_float(0x7f95aaaa);
4847  float q1 = rawbits_to_float(0x7fea1111);
4848  float q2 = rawbits_to_float(0x7fea2222);
4849  float qa = rawbits_to_float(0x7feaaaaa);
4850  VIXL_ASSERT(IsSignallingNaN(s1));
4851  VIXL_ASSERT(IsSignallingNaN(s2));
4852  VIXL_ASSERT(IsSignallingNaN(sa));
4853  VIXL_ASSERT(IsQuietNaN(q1));
4854  VIXL_ASSERT(IsQuietNaN(q2));
4855  VIXL_ASSERT(IsQuietNaN(qa));
4856
4857  // The input NaNs after passing through ProcessNaN.
4858  float s1_proc = rawbits_to_float(0x7fd51111);
4859  float s2_proc = rawbits_to_float(0x7fd52222);
4860  float sa_proc = rawbits_to_float(0x7fd5aaaa);
4861  float q1_proc = q1;
4862  float q2_proc = q2;
4863  float qa_proc = qa;
4864  VIXL_ASSERT(IsQuietNaN(s1_proc));
4865  VIXL_ASSERT(IsQuietNaN(s2_proc));
4866  VIXL_ASSERT(IsQuietNaN(sa_proc));
4867  VIXL_ASSERT(IsQuietNaN(q1_proc));
4868  VIXL_ASSERT(IsQuietNaN(q2_proc));
4869  VIXL_ASSERT(IsQuietNaN(qa_proc));
4870
4871  // Negated NaNs as it would be done on ARMv8 hardware.
4872  float s1_proc_neg = rawbits_to_float(0xffd51111);
4873  float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
4874  float q1_proc_neg = rawbits_to_float(0xffea1111);
4875  float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
4876  VIXL_ASSERT(IsQuietNaN(s1_proc_neg));
4877  VIXL_ASSERT(IsQuietNaN(sa_proc_neg));
4878  VIXL_ASSERT(IsQuietNaN(q1_proc_neg));
4879  VIXL_ASSERT(IsQuietNaN(qa_proc_neg));
4880
4881  // Quiet NaNs are propagated.
4882  FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
4883  FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
4884  FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4885  FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
4886  FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4887  FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4888  FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
4889
4890  // Signalling NaNs are propagated, and made quiet.
4891  FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4892  FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
4893  FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4894  FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4895  FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4896  FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4897  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4898
4899  // Signalling NaNs take precedence over quiet NaNs.
4900  FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4901  FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
4902  FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4903  FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
4904  FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4905  FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4906  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
4907
4908  // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
4909  FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
4910                   kFP32DefaultNaN, kFP32DefaultNaN,
4911                   kFP32DefaultNaN, kFP32DefaultNaN);
4912  FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
4913                   kFP32DefaultNaN, kFP32DefaultNaN,
4914                   kFP32DefaultNaN, kFP32DefaultNaN);
4915  FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
4916                   kFP32DefaultNaN, kFP32DefaultNaN,
4917                   kFP32DefaultNaN, kFP32DefaultNaN);
4918  FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
4919                   kFP32DefaultNaN, kFP32DefaultNaN,
4920                   kFP32DefaultNaN, kFP32DefaultNaN);
4921}
4922
4923
4924TEST(fdiv) {
4925  SETUP();
4926
4927  START();
4928  __ Fmov(s14, -0.0f);
4929  __ Fmov(s15, kFP32PositiveInfinity);
4930  __ Fmov(s16, kFP32NegativeInfinity);
4931  __ Fmov(s17, 3.25f);
4932  __ Fmov(s18, 2.0f);
4933  __ Fmov(s19, 2.0f);
4934  __ Fmov(s20, -2.0f);
4935
4936  __ Fmov(d26, -0.0);
4937  __ Fmov(d27, kFP64PositiveInfinity);
4938  __ Fmov(d28, kFP64NegativeInfinity);
4939  __ Fmov(d29, 0.0);
4940  __ Fmov(d30, -2.0);
4941  __ Fmov(d31, 2.25);
4942
4943  __ Fdiv(s0, s17, s18);
4944  __ Fdiv(s1, s18, s19);
4945  __ Fdiv(s2, s14, s18);
4946  __ Fdiv(s3, s18, s15);
4947  __ Fdiv(s4, s18, s16);
4948  __ Fdiv(s5, s15, s16);
4949  __ Fdiv(s6, s14, s14);
4950
4951  __ Fdiv(d7, d31, d30);
4952  __ Fdiv(d8, d29, d31);
4953  __ Fdiv(d9, d26, d31);
4954  __ Fdiv(d10, d31, d27);
4955  __ Fdiv(d11, d31, d28);
4956  __ Fdiv(d12, d28, d27);
4957  __ Fdiv(d13, d29, d29);
4958  END();
4959
4960  RUN();
4961
4962  ASSERT_EQUAL_FP32(1.625f, s0);
4963  ASSERT_EQUAL_FP32(1.0f, s1);
4964  ASSERT_EQUAL_FP32(-0.0f, s2);
4965  ASSERT_EQUAL_FP32(0.0f, s3);
4966  ASSERT_EQUAL_FP32(-0.0f, s4);
4967  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
4968  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
4969  ASSERT_EQUAL_FP64(-1.125, d7);
4970  ASSERT_EQUAL_FP64(0.0, d8);
4971  ASSERT_EQUAL_FP64(-0.0, d9);
4972  ASSERT_EQUAL_FP64(0.0, d10);
4973  ASSERT_EQUAL_FP64(-0.0, d11);
4974  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
4975  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
4976
4977  TEARDOWN();
4978}
4979
4980
4981static float MinMaxHelper(float n,
4982                          float m,
4983                          bool min,
4984                          float quiet_nan_substitute = 0.0) {
4985  const uint64_t kFP32QuietNaNMask = 0x00400000;
4986  uint32_t raw_n = float_to_rawbits(n);
4987  uint32_t raw_m = float_to_rawbits(m);
4988
4989  if (isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
4990    // n is signalling NaN.
4991    return rawbits_to_float(raw_n | kFP32QuietNaNMask);
4992  } else if (isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
4993    // m is signalling NaN.
4994    return rawbits_to_float(raw_m | kFP32QuietNaNMask);
4995  } else if (quiet_nan_substitute == 0.0) {
4996    if (isnan(n)) {
4997      // n is quiet NaN.
4998      return n;
4999    } else if (isnan(m)) {
5000      // m is quiet NaN.
5001      return m;
5002    }
5003  } else {
5004    // Substitute n or m if one is quiet, but not both.
5005    if (isnan(n) && !isnan(m)) {
5006      // n is quiet NaN: replace with substitute.
5007      n = quiet_nan_substitute;
5008    } else if (!isnan(n) && isnan(m)) {
5009      // m is quiet NaN: replace with substitute.
5010      m = quiet_nan_substitute;
5011    }
5012  }
5013
5014  if ((n == 0.0) && (m == 0.0) &&
5015      (copysign(1.0, n) != copysign(1.0, m))) {
5016    return min ? -0.0 : 0.0;
5017  }
5018
5019  return min ? fminf(n, m) : fmaxf(n, m);
5020}
5021
5022
5023static double MinMaxHelper(double n,
5024                           double m,
5025                           bool min,
5026                           double quiet_nan_substitute = 0.0) {
5027  const uint64_t kFP64QuietNaNMask = 0x0008000000000000;
5028  uint64_t raw_n = double_to_rawbits(n);
5029  uint64_t raw_m = double_to_rawbits(m);
5030
5031  if (isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
5032    // n is signalling NaN.
5033    return rawbits_to_double(raw_n | kFP64QuietNaNMask);
5034  } else if (isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
5035    // m is signalling NaN.
5036    return rawbits_to_double(raw_m | kFP64QuietNaNMask);
5037  } else if (quiet_nan_substitute == 0.0) {
5038    if (isnan(n)) {
5039      // n is quiet NaN.
5040      return n;
5041    } else if (isnan(m)) {
5042      // m is quiet NaN.
5043      return m;
5044    }
5045  } else {
5046    // Substitute n or m if one is quiet, but not both.
5047    if (isnan(n) && !isnan(m)) {
5048      // n is quiet NaN: replace with substitute.
5049      n = quiet_nan_substitute;
5050    } else if (!isnan(n) && isnan(m)) {
5051      // m is quiet NaN: replace with substitute.
5052      m = quiet_nan_substitute;
5053    }
5054  }
5055
5056  if ((n == 0.0) && (m == 0.0) &&
5057      (copysign(1.0, n) != copysign(1.0, m))) {
5058    return min ? -0.0 : 0.0;
5059  }
5060
5061  return min ? fmin(n, m) : fmax(n, m);
5062}
5063
5064
5065static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5066                                 double minnm, double maxnm) {
5067  SETUP();
5068
5069  START();
5070  __ Fmov(d0, n);
5071  __ Fmov(d1, m);
5072  __ Fmin(d28, d0, d1);
5073  __ Fmax(d29, d0, d1);
5074  __ Fminnm(d30, d0, d1);
5075  __ Fmaxnm(d31, d0, d1);
5076  END();
5077
5078  RUN();
5079
5080  ASSERT_EQUAL_FP64(min, d28);
5081  ASSERT_EQUAL_FP64(max, d29);
5082  ASSERT_EQUAL_FP64(minnm, d30);
5083  ASSERT_EQUAL_FP64(maxnm, d31);
5084
5085  TEARDOWN();
5086}
5087
5088
5089TEST(fmax_fmin_d) {
5090  // Use non-standard NaNs to check that the payload bits are preserved.
5091  double snan = rawbits_to_double(0x7ff5555512345678);
5092  double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5093
5094  double snan_processed = rawbits_to_double(0x7ffd555512345678);
5095  double qnan_processed = qnan;
5096
5097  VIXL_ASSERT(IsSignallingNaN(snan));
5098  VIXL_ASSERT(IsQuietNaN(qnan));
5099  VIXL_ASSERT(IsQuietNaN(snan_processed));
5100  VIXL_ASSERT(IsQuietNaN(qnan_processed));
5101
5102  // Bootstrap tests.
5103  FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5104  FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5105  FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5106                       kFP64NegativeInfinity, kFP64PositiveInfinity,
5107                       kFP64NegativeInfinity, kFP64PositiveInfinity);
5108  FminFmaxDoubleHelper(snan, 0,
5109                       snan_processed, snan_processed,
5110                       snan_processed, snan_processed);
5111  FminFmaxDoubleHelper(0, snan,
5112                       snan_processed, snan_processed,
5113                       snan_processed, snan_processed);
5114  FminFmaxDoubleHelper(qnan, 0,
5115                       qnan_processed, qnan_processed,
5116                       0, 0);
5117  FminFmaxDoubleHelper(0, qnan,
5118                       qnan_processed, qnan_processed,
5119                       0, 0);
5120  FminFmaxDoubleHelper(qnan, snan,
5121                       snan_processed, snan_processed,
5122                       snan_processed, snan_processed);
5123  FminFmaxDoubleHelper(snan, qnan,
5124                       snan_processed, snan_processed,
5125                       snan_processed, snan_processed);
5126
5127  // Iterate over all combinations of inputs.
5128  double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5129                      -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5130                      kFP64PositiveInfinity, kFP64NegativeInfinity,
5131                      kFP64QuietNaN, kFP64SignallingNaN };
5132
5133  const int count = sizeof(inputs) / sizeof(inputs[0]);
5134
5135  for (int in = 0; in < count; in++) {
5136    double n = inputs[in];
5137    for (int im = 0; im < count; im++) {
5138      double m = inputs[im];
5139      FminFmaxDoubleHelper(n, m,
5140                           MinMaxHelper(n, m, true),
5141                           MinMaxHelper(n, m, false),
5142                           MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5143                           MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5144    }
5145  }
5146}
5147
5148
5149static void FminFmaxFloatHelper(float n, float m, float min, float max,
5150                                float minnm, float maxnm) {
5151  SETUP();
5152
5153  START();
5154  __ Fmov(s0, n);
5155  __ Fmov(s1, m);
5156  __ Fmin(s28, s0, s1);
5157  __ Fmax(s29, s0, s1);
5158  __ Fminnm(s30, s0, s1);
5159  __ Fmaxnm(s31, s0, s1);
5160  END();
5161
5162  RUN();
5163
5164  ASSERT_EQUAL_FP32(min, s28);
5165  ASSERT_EQUAL_FP32(max, s29);
5166  ASSERT_EQUAL_FP32(minnm, s30);
5167  ASSERT_EQUAL_FP32(maxnm, s31);
5168
5169  TEARDOWN();
5170}
5171
5172
5173TEST(fmax_fmin_s) {
5174  // Use non-standard NaNs to check that the payload bits are preserved.
5175  float snan = rawbits_to_float(0x7f951234);
5176  float qnan = rawbits_to_float(0x7fea8765);
5177
5178  float snan_processed = rawbits_to_float(0x7fd51234);
5179  float qnan_processed = qnan;
5180
5181  VIXL_ASSERT(IsSignallingNaN(snan));
5182  VIXL_ASSERT(IsQuietNaN(qnan));
5183  VIXL_ASSERT(IsQuietNaN(snan_processed));
5184  VIXL_ASSERT(IsQuietNaN(qnan_processed));
5185
5186  // Bootstrap tests.
5187  FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5188  FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5189  FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5190                      kFP32NegativeInfinity, kFP32PositiveInfinity,
5191                      kFP32NegativeInfinity, kFP32PositiveInfinity);
5192  FminFmaxFloatHelper(snan, 0,
5193                      snan_processed, snan_processed,
5194                      snan_processed, snan_processed);
5195  FminFmaxFloatHelper(0, snan,
5196                      snan_processed, snan_processed,
5197                      snan_processed, snan_processed);
5198  FminFmaxFloatHelper(qnan, 0,
5199                      qnan_processed, qnan_processed,
5200                      0, 0);
5201  FminFmaxFloatHelper(0, qnan,
5202                      qnan_processed, qnan_processed,
5203                      0, 0);
5204  FminFmaxFloatHelper(qnan, snan,
5205                      snan_processed, snan_processed,
5206                      snan_processed, snan_processed);
5207  FminFmaxFloatHelper(snan, qnan,
5208                      snan_processed, snan_processed,
5209                      snan_processed, snan_processed);
5210
5211  // Iterate over all combinations of inputs.
5212  float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5213                     -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5214                     kFP32PositiveInfinity, kFP32NegativeInfinity,
5215                     kFP32QuietNaN, kFP32SignallingNaN };
5216
5217  const int count = sizeof(inputs) / sizeof(inputs[0]);
5218
5219  for (int in = 0; in < count; in++) {
5220    float n = inputs[in];
5221    for (int im = 0; im < count; im++) {
5222      float m = inputs[im];
5223      FminFmaxFloatHelper(n, m,
5224                          MinMaxHelper(n, m, true),
5225                          MinMaxHelper(n, m, false),
5226                          MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5227                          MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5228    }
5229  }
5230}
5231
5232
5233TEST(fccmp) {
5234  SETUP();
5235
5236  START();
5237  __ Fmov(s16, 0.0);
5238  __ Fmov(s17, 0.5);
5239  __ Fmov(d18, -0.5);
5240  __ Fmov(d19, -1.0);
5241  __ Mov(x20, 0);
5242
5243  __ Cmp(x20, 0);
5244  __ Fccmp(s16, s16, NoFlag, eq);
5245  __ Mrs(x0, NZCV);
5246
5247  __ Cmp(x20, 0);
5248  __ Fccmp(s16, s16, VFlag, ne);
5249  __ Mrs(x1, NZCV);
5250
5251  __ Cmp(x20, 0);
5252  __ Fccmp(s16, s17, CFlag, ge);
5253  __ Mrs(x2, NZCV);
5254
5255  __ Cmp(x20, 0);
5256  __ Fccmp(s16, s17, CVFlag, lt);
5257  __ Mrs(x3, NZCV);
5258
5259  __ Cmp(x20, 0);
5260  __ Fccmp(d18, d18, ZFlag, le);
5261  __ Mrs(x4, NZCV);
5262
5263  __ Cmp(x20, 0);
5264  __ Fccmp(d18, d18, ZVFlag, gt);
5265  __ Mrs(x5, NZCV);
5266
5267  __ Cmp(x20, 0);
5268  __ Fccmp(d18, d19, ZCVFlag, ls);
5269  __ Mrs(x6, NZCV);
5270
5271  __ Cmp(x20, 0);
5272  __ Fccmp(d18, d19, NFlag, hi);
5273  __ Mrs(x7, NZCV);
5274
5275  __ fccmp(s16, s16, NFlag, al);
5276  __ Mrs(x8, NZCV);
5277
5278  __ fccmp(d18, d18, NFlag, nv);
5279  __ Mrs(x9, NZCV);
5280  END();
5281
5282  RUN();
5283
5284  ASSERT_EQUAL_32(ZCFlag, w0);
5285  ASSERT_EQUAL_32(VFlag, w1);
5286  ASSERT_EQUAL_32(NFlag,