1// Copyright 2013 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
31#include <cmath>
32#include <limits>
33
34#include "src/v8.h"
35
36#include "src/macro-assembler.h"
37#include "src/arm64/simulator-arm64.h"
38#include "src/arm64/decoder-arm64-inl.h"
39#include "src/arm64/disasm-arm64.h"
40#include "src/arm64/utils-arm64.h"
41#include "test/cctest/cctest.h"
42#include "test/cctest/test-utils-arm64.h"
43
44using namespace v8::internal;
45
46// Test infrastructure.
47//
48// Tests are functions which accept no parameters and have no return values.
49// The testing code should not perform an explicit return once completed. For
50// example to test the mov immediate instruction a very simple test would be:
51//
52//   TEST(mov_x0_one) {
53//     SETUP();
54//
55//     START();
56//     __ mov(x0, Operand(1));
57//     END();
58//
59//     RUN();
60//
61//     ASSERT_EQUAL_64(1, x0);
62//
63//     TEARDOWN();
64//   }
65//
66// Within a START ... END block all registers but sp can be modified. sp has to
67// be explicitly saved/restored. The END() macro replaces the function return
68// so it may appear multiple times in a test if the test has multiple exit
69// points.
70//
71// Once the test has been run all integer and floating point registers as well
72// as flags are accessible through a RegisterDump instance, see
73// utils-arm64.cc for more info on RegisterDump.
74//
75// We provide some helper assert to handle common cases:
76//
77//   ASSERT_EQUAL_32(int32_t, int_32t)
78//   ASSERT_EQUAL_FP32(float, float)
79//   ASSERT_EQUAL_32(int32_t, W register)
80//   ASSERT_EQUAL_FP32(float, S register)
81//   ASSERT_EQUAL_64(int64_t, int_64t)
82//   ASSERT_EQUAL_FP64(double, double)
83//   ASSERT_EQUAL_64(int64_t, X register)
84//   ASSERT_EQUAL_64(X register, X register)
85//   ASSERT_EQUAL_FP64(double, D register)
86//
87// e.g. ASSERT_EQUAL_64(0.5, d30);
88//
89// If more advance computation is required before the assert then access the
90// RegisterDump named core directly:
91//
92//   ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
93
94
95#if 0  // TODO(all): enable.
96static v8::Persistent<v8::Context> env;
97
98static void InitializeVM() {
99  if (env.IsEmpty()) {
100    env = v8::Context::New();
101  }
102}
103#endif
104
105#define __ masm.
106
107#define BUF_SIZE 8192
108#define SETUP() SETUP_SIZE(BUF_SIZE)
109
110#define INIT_V8()                                                              \
111  CcTest::InitializeVM();                                                      \
112
113#ifdef USE_SIMULATOR
114
115// Run tests with the simulator.
116#define SETUP_SIZE(buf_size)                    \
117  Isolate* isolate = Isolate::Current();        \
118  HandleScope scope(isolate);                   \
119  ASSERT(isolate != NULL);                      \
120  byte* buf = new byte[buf_size];               \
121  MacroAssembler masm(isolate, buf, buf_size);  \
122  Decoder<DispatchingDecoderVisitor>* decoder = \
123      new Decoder<DispatchingDecoderVisitor>(); \
124  Simulator simulator(decoder);                 \
125  PrintDisassembler* pdis = NULL;               \
126  RegisterDump core;
127
128/*  if (Cctest::trace_sim()) {                                                 \
129    pdis = new PrintDisassembler(stdout);                                      \
130    decoder.PrependVisitor(pdis);                                              \
131  }                                                                            \
132  */
133
134// Reset the assembler and simulator, so that instructions can be generated,
135// but don't actually emit any code. This can be used by tests that need to
136// emit instructions at the start of the buffer. Note that START_AFTER_RESET
137// must be called before any callee-saved register is modified, and before an
138// END is encountered.
139//
140// Most tests should call START, rather than call RESET directly.
141#define RESET()                                                                \
142  __ Reset();                                                                  \
143  simulator.ResetState();
144
145#define START_AFTER_RESET()                                                    \
146  __ SetStackPointer(csp);                                                     \
147  __ PushCalleeSavedRegisters();                                               \
148  __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
149
150#define START()                                                                \
151  RESET();                                                                     \
152  START_AFTER_RESET();
153
154#define RUN()                                                                  \
155  simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
156
157#define END()                                                                  \
158  __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL);                    \
159  core.Dump(&masm);                                                            \
160  __ PopCalleeSavedRegisters();                                                \
161  __ Ret();                                                                    \
162  __ GetCode(NULL);
163
164#define TEARDOWN()                                                             \
165  delete pdis;                                                                 \
166  delete[] buf;
167
168#else  // ifdef USE_SIMULATOR.
169// Run the test on real hardware or models.
170#define SETUP_SIZE(buf_size)                                                   \
171  Isolate* isolate = Isolate::Current();                                       \
172  HandleScope scope(isolate);                                                  \
173  ASSERT(isolate != NULL);                                                     \
174  byte* buf = new byte[buf_size];                                              \
175  MacroAssembler masm(isolate, buf, buf_size);                                 \
176  RegisterDump core;
177
178#define RESET()                                                                \
179  __ Reset();                                                                  \
180  /* Reset the machine state (like simulator.ResetState()). */                 \
181  __ Msr(NZCV, xzr);                                                           \
182  __ Msr(FPCR, xzr);
183
184
185#define START_AFTER_RESET()                                                    \
186  __ SetStackPointer(csp);                                                     \
187  __ PushCalleeSavedRegisters();
188
189#define START()                                                                \
190  RESET();                                                                     \
191  START_AFTER_RESET();
192
193#define RUN()                                                                  \
194  CPU::FlushICache(buf, masm.SizeOfGeneratedCode());                           \
195  {                                                                            \
196    void (*test_function)(void);                                               \
197    memcpy(&test_function, &buf, sizeof(buf));                                 \
198    test_function();                                                           \
199  }
200
201#define END()                                                                  \
202  core.Dump(&masm);                                                            \
203  __ PopCalleeSavedRegisters();                                                \
204  __ Ret();                                                                    \
205  __ GetCode(NULL);
206
207#define TEARDOWN()                                                             \
208  delete[] buf;
209
210#endif  // ifdef USE_SIMULATOR.
211
212#define ASSERT_EQUAL_NZCV(expected)                                            \
213  CHECK(EqualNzcv(expected, core.flags_nzcv()))
214
215#define ASSERT_EQUAL_REGISTERS(expected)                                       \
216  CHECK(EqualRegisters(&expected, &core))
217
218#define ASSERT_EQUAL_32(expected, result)                                      \
219  CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
220
221#define ASSERT_EQUAL_FP32(expected, result)                                    \
222  CHECK(EqualFP32(expected, &core, result))
223
224#define ASSERT_EQUAL_64(expected, result)                                      \
225  CHECK(Equal64(expected, &core, result))
226
227#define ASSERT_EQUAL_FP64(expected, result)                                    \
228  CHECK(EqualFP64(expected, &core, result))
229
230#ifdef DEBUG
231#define ASSERT_LITERAL_POOL_SIZE(expected)                                     \
232  CHECK((expected) == (__ LiteralPoolSize()))
233#else
234#define ASSERT_LITERAL_POOL_SIZE(expected)                                     \
235  ((void) 0)
236#endif
237
238
239TEST(stack_ops) {
240  INIT_V8();
241  SETUP();
242
243  START();
244  // save csp.
245  __ Mov(x29, csp);
246
247  // Set the csp to a known value.
248  __ Mov(x16, 0x1000);
249  __ Mov(csp, x16);
250  __ Mov(x0, csp);
251
252  // Add immediate to the csp, and move the result to a normal register.
253  __ Add(csp, csp, Operand(0x50));
254  __ Mov(x1, csp);
255
256  // Add extended to the csp, and move the result to a normal register.
257  __ Mov(x17, 0xfff);
258  __ Add(csp, csp, Operand(x17, SXTB));
259  __ Mov(x2, csp);
260
261  // Create an csp using a logical instruction, and move to normal register.
262  __ Orr(csp, xzr, Operand(0x1fff));
263  __ Mov(x3, csp);
264
265  // Write wcsp using a logical instruction.
266  __ Orr(wcsp, wzr, Operand(0xfffffff8L));
267  __ Mov(x4, csp);
268
269  // Write csp, and read back wcsp.
270  __ Orr(csp, xzr, Operand(0xfffffff8L));
271  __ Mov(w5, wcsp);
272
273  //  restore csp.
274  __ Mov(csp, x29);
275  END();
276
277  RUN();
278
279  ASSERT_EQUAL_64(0x1000, x0);
280  ASSERT_EQUAL_64(0x1050, x1);
281  ASSERT_EQUAL_64(0x104f, x2);
282  ASSERT_EQUAL_64(0x1fff, x3);
283  ASSERT_EQUAL_64(0xfffffff8, x4);
284  ASSERT_EQUAL_64(0xfffffff8, x5);
285
286  TEARDOWN();
287}
288
289
290TEST(mvn) {
291  INIT_V8();
292  SETUP();
293
294  START();
295  __ Mvn(w0, 0xfff);
296  __ Mvn(x1, 0xfff);
297  __ Mvn(w2, Operand(w0, LSL, 1));
298  __ Mvn(x3, Operand(x1, LSL, 2));
299  __ Mvn(w4, Operand(w0, LSR, 3));
300  __ Mvn(x5, Operand(x1, LSR, 4));
301  __ Mvn(w6, Operand(w0, ASR, 11));
302  __ Mvn(x7, Operand(x1, ASR, 12));
303  __ Mvn(w8, Operand(w0, ROR, 13));
304  __ Mvn(x9, Operand(x1, ROR, 14));
305  __ Mvn(w10, Operand(w2, UXTB));
306  __ Mvn(x11, Operand(x2, SXTB, 1));
307  __ Mvn(w12, Operand(w2, UXTH, 2));
308  __ Mvn(x13, Operand(x2, SXTH, 3));
309  __ Mvn(x14, Operand(w2, UXTW, 4));
310  __ Mvn(x15, Operand(w2, SXTW, 4));
311  END();
312
313  RUN();
314
315  ASSERT_EQUAL_64(0xfffff000, x0);
316  ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
317  ASSERT_EQUAL_64(0x00001fff, x2);
318  ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
319  ASSERT_EQUAL_64(0xe00001ff, x4);
320  ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
321  ASSERT_EQUAL_64(0x00000001, x6);
322  ASSERT_EQUAL_64(0x0, x7);
323  ASSERT_EQUAL_64(0x7ff80000, x8);
324  ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
325  ASSERT_EQUAL_64(0xffffff00, x10);
326  ASSERT_EQUAL_64(0x0000000000000001UL, x11);
327  ASSERT_EQUAL_64(0xffff8003, x12);
328  ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
329  ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
330  ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
331
332  TEARDOWN();
333}
334
335
336TEST(mov) {
337  INIT_V8();
338  SETUP();
339
340  START();
341  __ Mov(x0, 0xffffffffffffffffL);
342  __ Mov(x1, 0xffffffffffffffffL);
343  __ Mov(x2, 0xffffffffffffffffL);
344  __ Mov(x3, 0xffffffffffffffffL);
345
346  __ Mov(x0, 0x0123456789abcdefL);
347
348  __ movz(x1, 0xabcdL << 16);
349  __ movk(x2, 0xabcdL << 32);
350  __ movn(x3, 0xabcdL << 48);
351
352  __ Mov(x4, 0x0123456789abcdefL);
353  __ Mov(x5, x4);
354
355  __ Mov(w6, -1);
356
357  // Test that moves back to the same register have the desired effect. This
358  // is a no-op for X registers, and a truncation for W registers.
359  __ Mov(x7, 0x0123456789abcdefL);
360  __ Mov(x7, x7);
361  __ Mov(x8, 0x0123456789abcdefL);
362  __ Mov(w8, w8);
363  __ Mov(x9, 0x0123456789abcdefL);
364  __ Mov(x9, Operand(x9));
365  __ Mov(x10, 0x0123456789abcdefL);
366  __ Mov(w10, Operand(w10));
367
368  __ Mov(w11, 0xfff);
369  __ Mov(x12, 0xfff);
370  __ Mov(w13, Operand(w11, LSL, 1));
371  __ Mov(x14, Operand(x12, LSL, 2));
372  __ Mov(w15, Operand(w11, LSR, 3));
373  __ Mov(x18, Operand(x12, LSR, 4));
374  __ Mov(w19, Operand(w11, ASR, 11));
375  __ Mov(x20, Operand(x12, ASR, 12));
376  __ Mov(w21, Operand(w11, ROR, 13));
377  __ Mov(x22, Operand(x12, ROR, 14));
378  __ Mov(w23, Operand(w13, UXTB));
379  __ Mov(x24, Operand(x13, SXTB, 1));
380  __ Mov(w25, Operand(w13, UXTH, 2));
381  __ Mov(x26, Operand(x13, SXTH, 3));
382  __ Mov(x27, Operand(w13, UXTW, 4));
383  END();
384
385  RUN();
386
387  ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
388  ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
389  ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
390  ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
391  ASSERT_EQUAL_64(x4, x5);
392  ASSERT_EQUAL_32(-1, w6);
393  ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
394  ASSERT_EQUAL_32(0x89abcdefL, w8);
395  ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
396  ASSERT_EQUAL_32(0x89abcdefL, w10);
397  ASSERT_EQUAL_64(0x00000fff, x11);
398  ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
399  ASSERT_EQUAL_64(0x00001ffe, x13);
400  ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
401  ASSERT_EQUAL_64(0x000001ff, x15);
402  ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
403  ASSERT_EQUAL_64(0x00000001, x19);
404  ASSERT_EQUAL_64(0x0, x20);
405  ASSERT_EQUAL_64(0x7ff80000, x21);
406  ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
407  ASSERT_EQUAL_64(0x000000fe, x23);
408  ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
409  ASSERT_EQUAL_64(0x00007ff8, x25);
410  ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
411  ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
412
413  TEARDOWN();
414}
415
416
417TEST(mov_imm_w) {
418  INIT_V8();
419  SETUP();
420
421  START();
422  __ Mov(w0, 0xffffffffL);
423  __ Mov(w1, 0xffff1234L);
424  __ Mov(w2, 0x1234ffffL);
425  __ Mov(w3, 0x00000000L);
426  __ Mov(w4, 0x00001234L);
427  __ Mov(w5, 0x12340000L);
428  __ Mov(w6, 0x12345678L);
429  END();
430
431  RUN();
432
433  ASSERT_EQUAL_64(0xffffffffL, x0);
434  ASSERT_EQUAL_64(0xffff1234L, x1);
435  ASSERT_EQUAL_64(0x1234ffffL, x2);
436  ASSERT_EQUAL_64(0x00000000L, x3);
437  ASSERT_EQUAL_64(0x00001234L, x4);
438  ASSERT_EQUAL_64(0x12340000L, x5);
439  ASSERT_EQUAL_64(0x12345678L, x6);
440
441  TEARDOWN();
442}
443
444
445TEST(mov_imm_x) {
446  INIT_V8();
447  SETUP();
448
449  START();
450  __ Mov(x0, 0xffffffffffffffffL);
451  __ Mov(x1, 0xffffffffffff1234L);
452  __ Mov(x2, 0xffffffff12345678L);
453  __ Mov(x3, 0xffff1234ffff5678L);
454  __ Mov(x4, 0x1234ffffffff5678L);
455  __ Mov(x5, 0x1234ffff5678ffffL);
456  __ Mov(x6, 0x12345678ffffffffL);
457  __ Mov(x7, 0x1234ffffffffffffL);
458  __ Mov(x8, 0x123456789abcffffL);
459  __ Mov(x9, 0x12345678ffff9abcL);
460  __ Mov(x10, 0x1234ffff56789abcL);
461  __ Mov(x11, 0xffff123456789abcL);
462  __ Mov(x12, 0x0000000000000000L);
463  __ Mov(x13, 0x0000000000001234L);
464  __ Mov(x14, 0x0000000012345678L);
465  __ Mov(x15, 0x0000123400005678L);
466  __ Mov(x18, 0x1234000000005678L);
467  __ Mov(x19, 0x1234000056780000L);
468  __ Mov(x20, 0x1234567800000000L);
469  __ Mov(x21, 0x1234000000000000L);
470  __ Mov(x22, 0x123456789abc0000L);
471  __ Mov(x23, 0x1234567800009abcL);
472  __ Mov(x24, 0x1234000056789abcL);
473  __ Mov(x25, 0x0000123456789abcL);
474  __ Mov(x26, 0x123456789abcdef0L);
475  __ Mov(x27, 0xffff000000000001L);
476  __ Mov(x28, 0x8000ffff00000000L);
477  END();
478
479  RUN();
480
481  ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
482  ASSERT_EQUAL_64(0xffffffff12345678L, x2);
483  ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
484  ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
485  ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
486  ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
487  ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
488  ASSERT_EQUAL_64(0x123456789abcffffL, x8);
489  ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
490  ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
491  ASSERT_EQUAL_64(0xffff123456789abcL, x11);
492  ASSERT_EQUAL_64(0x0000000000000000L, x12);
493  ASSERT_EQUAL_64(0x0000000000001234L, x13);
494  ASSERT_EQUAL_64(0x0000000012345678L, x14);
495  ASSERT_EQUAL_64(0x0000123400005678L, x15);
496  ASSERT_EQUAL_64(0x1234000000005678L, x18);
497  ASSERT_EQUAL_64(0x1234000056780000L, x19);
498  ASSERT_EQUAL_64(0x1234567800000000L, x20);
499  ASSERT_EQUAL_64(0x1234000000000000L, x21);
500  ASSERT_EQUAL_64(0x123456789abc0000L, x22);
501  ASSERT_EQUAL_64(0x1234567800009abcL, x23);
502  ASSERT_EQUAL_64(0x1234000056789abcL, x24);
503  ASSERT_EQUAL_64(0x0000123456789abcL, x25);
504  ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
505  ASSERT_EQUAL_64(0xffff000000000001L, x27);
506  ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
507
508  TEARDOWN();
509}
510
511
512TEST(orr) {
513  INIT_V8();
514  SETUP();
515
516  START();
517  __ Mov(x0, 0xf0f0);
518  __ Mov(x1, 0xf00000ff);
519
520  __ Orr(x2, x0, Operand(x1));
521  __ Orr(w3, w0, Operand(w1, LSL, 28));
522  __ Orr(x4, x0, Operand(x1, LSL, 32));
523  __ Orr(x5, x0, Operand(x1, LSR, 4));
524  __ Orr(w6, w0, Operand(w1, ASR, 4));
525  __ Orr(x7, x0, Operand(x1, ASR, 4));
526  __ Orr(w8, w0, Operand(w1, ROR, 12));
527  __ Orr(x9, x0, Operand(x1, ROR, 12));
528  __ Orr(w10, w0, Operand(0xf));
529  __ Orr(x11, x0, Operand(0xf0000000f0000000L));
530  END();
531
532  RUN();
533
534  ASSERT_EQUAL_64(0xf000f0ff, x2);
535  ASSERT_EQUAL_64(0xf000f0f0, x3);
536  ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
537  ASSERT_EQUAL_64(0x0f00f0ff, x5);
538  ASSERT_EQUAL_64(0xff00f0ff, x6);
539  ASSERT_EQUAL_64(0x0f00f0ff, x7);
540  ASSERT_EQUAL_64(0x0ffff0f0, x8);
541  ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
542  ASSERT_EQUAL_64(0xf0ff, x10);
543  ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
544
545  TEARDOWN();
546}
547
548
549TEST(orr_extend) {
550  INIT_V8();
551  SETUP();
552
553  START();
554  __ Mov(x0, 1);
555  __ Mov(x1, 0x8000000080008080UL);
556  __ Orr(w6, w0, Operand(w1, UXTB));
557  __ Orr(x7, x0, Operand(x1, UXTH, 1));
558  __ Orr(w8, w0, Operand(w1, UXTW, 2));
559  __ Orr(x9, x0, Operand(x1, UXTX, 3));
560  __ Orr(w10, w0, Operand(w1, SXTB));
561  __ Orr(x11, x0, Operand(x1, SXTH, 1));
562  __ Orr(x12, x0, Operand(x1, SXTW, 2));
563  __ Orr(x13, x0, Operand(x1, SXTX, 3));
564  END();
565
566  RUN();
567
568  ASSERT_EQUAL_64(0x00000081, x6);
569  ASSERT_EQUAL_64(0x00010101, x7);
570  ASSERT_EQUAL_64(0x00020201, x8);
571  ASSERT_EQUAL_64(0x0000000400040401UL, x9);
572  ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
573  ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
574  ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
575  ASSERT_EQUAL_64(0x0000000400040401UL, x13);
576
577  TEARDOWN();
578}
579
580
581TEST(bitwise_wide_imm) {
582  INIT_V8();
583  SETUP();
584
585  START();
586  __ Mov(x0, 0);
587  __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
588
589  __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
590  __ Orr(w11, w1, Operand(0x90abcdef));
591  END();
592
593  RUN();
594
595  ASSERT_EQUAL_64(0, x0);
596  ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
597  ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
598  ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
599
600  TEARDOWN();
601}
602
603
604TEST(orn) {
605  INIT_V8();
606  SETUP();
607
608  START();
609  __ Mov(x0, 0xf0f0);
610  __ Mov(x1, 0xf00000ff);
611
612  __ Orn(x2, x0, Operand(x1));
613  __ Orn(w3, w0, Operand(w1, LSL, 4));
614  __ Orn(x4, x0, Operand(x1, LSL, 4));
615  __ Orn(x5, x0, Operand(x1, LSR, 1));
616  __ Orn(w6, w0, Operand(w1, ASR, 1));
617  __ Orn(x7, x0, Operand(x1, ASR, 1));
618  __ Orn(w8, w0, Operand(w1, ROR, 16));
619  __ Orn(x9, x0, Operand(x1, ROR, 16));
620  __ Orn(w10, w0, Operand(0xffff));
621  __ Orn(x11, x0, Operand(0xffff0000ffffL));
622  END();
623
624  RUN();
625
626  ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
627  ASSERT_EQUAL_64(0xfffff0ff, x3);
628  ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
629  ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
630  ASSERT_EQUAL_64(0x07fffff0, x6);
631  ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
632  ASSERT_EQUAL_64(0xff00ffff, x8);
633  ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
634  ASSERT_EQUAL_64(0xfffff0f0, x10);
635  ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
636
637  TEARDOWN();
638}
639
640
641TEST(orn_extend) {
642  INIT_V8();
643  SETUP();
644
645  START();
646  __ Mov(x0, 1);
647  __ Mov(x1, 0x8000000080008081UL);
648  __ Orn(w6, w0, Operand(w1, UXTB));
649  __ Orn(x7, x0, Operand(x1, UXTH, 1));
650  __ Orn(w8, w0, Operand(w1, UXTW, 2));
651  __ Orn(x9, x0, Operand(x1, UXTX, 3));
652  __ Orn(w10, w0, Operand(w1, SXTB));
653  __ Orn(x11, x0, Operand(x1, SXTH, 1));
654  __ Orn(x12, x0, Operand(x1, SXTW, 2));
655  __ Orn(x13, x0, Operand(x1, SXTX, 3));
656  END();
657
658  RUN();
659
660  ASSERT_EQUAL_64(0xffffff7f, x6);
661  ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
662  ASSERT_EQUAL_64(0xfffdfdfb, x8);
663  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
664  ASSERT_EQUAL_64(0x0000007f, x10);
665  ASSERT_EQUAL_64(0x0000fefd, x11);
666  ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
667  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
668
669  TEARDOWN();
670}
671
672
673TEST(and_) {
674  INIT_V8();
675  SETUP();
676
677  START();
678  __ Mov(x0, 0xfff0);
679  __ Mov(x1, 0xf00000ff);
680
681  __ And(x2, x0, Operand(x1));
682  __ And(w3, w0, Operand(w1, LSL, 4));
683  __ And(x4, x0, Operand(x1, LSL, 4));
684  __ And(x5, x0, Operand(x1, LSR, 1));
685  __ And(w6, w0, Operand(w1, ASR, 20));
686  __ And(x7, x0, Operand(x1, ASR, 20));
687  __ And(w8, w0, Operand(w1, ROR, 28));
688  __ And(x9, x0, Operand(x1, ROR, 28));
689  __ And(w10, w0, Operand(0xff00));
690  __ And(x11, x0, Operand(0xff));
691  END();
692
693  RUN();
694
695  ASSERT_EQUAL_64(0x000000f0, x2);
696  ASSERT_EQUAL_64(0x00000ff0, x3);
697  ASSERT_EQUAL_64(0x00000ff0, x4);
698  ASSERT_EQUAL_64(0x00000070, x5);
699  ASSERT_EQUAL_64(0x0000ff00, x6);
700  ASSERT_EQUAL_64(0x00000f00, x7);
701  ASSERT_EQUAL_64(0x00000ff0, x8);
702  ASSERT_EQUAL_64(0x00000000, x9);
703  ASSERT_EQUAL_64(0x0000ff00, x10);
704  ASSERT_EQUAL_64(0x000000f0, x11);
705
706  TEARDOWN();
707}
708
709
710TEST(and_extend) {
711  INIT_V8();
712  SETUP();
713
714  START();
715  __ Mov(x0, 0xffffffffffffffffUL);
716  __ Mov(x1, 0x8000000080008081UL);
717  __ And(w6, w0, Operand(w1, UXTB));
718  __ And(x7, x0, Operand(x1, UXTH, 1));
719  __ And(w8, w0, Operand(w1, UXTW, 2));
720  __ And(x9, x0, Operand(x1, UXTX, 3));
721  __ And(w10, w0, Operand(w1, SXTB));
722  __ And(x11, x0, Operand(x1, SXTH, 1));
723  __ And(x12, x0, Operand(x1, SXTW, 2));
724  __ And(x13, x0, Operand(x1, SXTX, 3));
725  END();
726
727  RUN();
728
729  ASSERT_EQUAL_64(0x00000081, x6);
730  ASSERT_EQUAL_64(0x00010102, x7);
731  ASSERT_EQUAL_64(0x00020204, x8);
732  ASSERT_EQUAL_64(0x0000000400040408UL, x9);
733  ASSERT_EQUAL_64(0xffffff81, x10);
734  ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
735  ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
736  ASSERT_EQUAL_64(0x0000000400040408UL, x13);
737
738  TEARDOWN();
739}
740
741
742TEST(ands) {
743  INIT_V8();
744  SETUP();
745
746  START();
747  __ Mov(x1, 0xf00000ff);
748  __ Ands(w0, w1, Operand(w1));
749  END();
750
751  RUN();
752
753  ASSERT_EQUAL_NZCV(NFlag);
754  ASSERT_EQUAL_64(0xf00000ff, x0);
755
756  START();
757  __ Mov(x0, 0xfff0);
758  __ Mov(x1, 0xf00000ff);
759  __ Ands(w0, w0, Operand(w1, LSR, 4));
760  END();
761
762  RUN();
763
764  ASSERT_EQUAL_NZCV(ZFlag);
765  ASSERT_EQUAL_64(0x00000000, x0);
766
767  START();
768  __ Mov(x0, 0x8000000000000000L);
769  __ Mov(x1, 0x00000001);
770  __ Ands(x0, x0, Operand(x1, ROR, 1));
771  END();
772
773  RUN();
774
775  ASSERT_EQUAL_NZCV(NFlag);
776  ASSERT_EQUAL_64(0x8000000000000000L, x0);
777
778  START();
779  __ Mov(x0, 0xfff0);
780  __ Ands(w0, w0, Operand(0xf));
781  END();
782
783  RUN();
784
785  ASSERT_EQUAL_NZCV(ZFlag);
786  ASSERT_EQUAL_64(0x00000000, x0);
787
788  START();
789  __ Mov(x0, 0xff000000);
790  __ Ands(w0, w0, Operand(0x80000000));
791  END();
792
793  RUN();
794
795  ASSERT_EQUAL_NZCV(NFlag);
796  ASSERT_EQUAL_64(0x80000000, x0);
797
798  TEARDOWN();
799}
800
801
802TEST(bic) {
803  INIT_V8();
804  SETUP();
805
806  START();
807  __ Mov(x0, 0xfff0);
808  __ Mov(x1, 0xf00000ff);
809
810  __ Bic(x2, x0, Operand(x1));
811  __ Bic(w3, w0, Operand(w1, LSL, 4));
812  __ Bic(x4, x0, Operand(x1, LSL, 4));
813  __ Bic(x5, x0, Operand(x1, LSR, 1));
814  __ Bic(w6, w0, Operand(w1, ASR, 20));
815  __ Bic(x7, x0, Operand(x1, ASR, 20));
816  __ Bic(w8, w0, Operand(w1, ROR, 28));
817  __ Bic(x9, x0, Operand(x1, ROR, 24));
818  __ Bic(x10, x0, Operand(0x1f));
819  __ Bic(x11, x0, Operand(0x100));
820
821  // Test bic into csp when the constant cannot be encoded in the immediate
822  // field.
823  // Use x20 to preserve csp. We check for the result via x21 because the
824  // test infrastructure requires that csp be restored to its original value.
825  __ Mov(x20, csp);
826  __ Mov(x0, 0xffffff);
827  __ Bic(csp, x0, Operand(0xabcdef));
828  __ Mov(x21, csp);
829  __ Mov(csp, x20);
830  END();
831
832  RUN();
833
834  ASSERT_EQUAL_64(0x0000ff00, x2);
835  ASSERT_EQUAL_64(0x0000f000, x3);
836  ASSERT_EQUAL_64(0x0000f000, x4);
837  ASSERT_EQUAL_64(0x0000ff80, x5);
838  ASSERT_EQUAL_64(0x000000f0, x6);
839  ASSERT_EQUAL_64(0x0000f0f0, x7);
840  ASSERT_EQUAL_64(0x0000f000, x8);
841  ASSERT_EQUAL_64(0x0000ff00, x9);
842  ASSERT_EQUAL_64(0x0000ffe0, x10);
843  ASSERT_EQUAL_64(0x0000fef0, x11);
844
845  ASSERT_EQUAL_64(0x543210, x21);
846
847  TEARDOWN();
848}
849
850
851TEST(bic_extend) {
852  INIT_V8();
853  SETUP();
854
855  START();
856  __ Mov(x0, 0xffffffffffffffffUL);
857  __ Mov(x1, 0x8000000080008081UL);
858  __ Bic(w6, w0, Operand(w1, UXTB));
859  __ Bic(x7, x0, Operand(x1, UXTH, 1));
860  __ Bic(w8, w0, Operand(w1, UXTW, 2));
861  __ Bic(x9, x0, Operand(x1, UXTX, 3));
862  __ Bic(w10, w0, Operand(w1, SXTB));
863  __ Bic(x11, x0, Operand(x1, SXTH, 1));
864  __ Bic(x12, x0, Operand(x1, SXTW, 2));
865  __ Bic(x13, x0, Operand(x1, SXTX, 3));
866  END();
867
868  RUN();
869
870  ASSERT_EQUAL_64(0xffffff7e, x6);
871  ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
872  ASSERT_EQUAL_64(0xfffdfdfb, x8);
873  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
874  ASSERT_EQUAL_64(0x0000007e, x10);
875  ASSERT_EQUAL_64(0x0000fefd, x11);
876  ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
877  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
878
879  TEARDOWN();
880}
881
882
883TEST(bics) {
884  INIT_V8();
885  SETUP();
886
887  START();
888  __ Mov(x1, 0xffff);
889  __ Bics(w0, w1, Operand(w1));
890  END();
891
892  RUN();
893
894  ASSERT_EQUAL_NZCV(ZFlag);
895  ASSERT_EQUAL_64(0x00000000, x0);
896
897  START();
898  __ Mov(x0, 0xffffffff);
899  __ Bics(w0, w0, Operand(w0, LSR, 1));
900  END();
901
902  RUN();
903
904  ASSERT_EQUAL_NZCV(NFlag);
905  ASSERT_EQUAL_64(0x80000000, x0);
906
907  START();
908  __ Mov(x0, 0x8000000000000000L);
909  __ Mov(x1, 0x00000001);
910  __ Bics(x0, x0, Operand(x1, ROR, 1));
911  END();
912
913  RUN();
914
915  ASSERT_EQUAL_NZCV(ZFlag);
916  ASSERT_EQUAL_64(0x00000000, x0);
917
918  START();
919  __ Mov(x0, 0xffffffffffffffffL);
920  __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
921  END();
922
923  RUN();
924
925  ASSERT_EQUAL_NZCV(NFlag);
926  ASSERT_EQUAL_64(0x8000000000000000L, x0);
927
928  START();
929  __ Mov(w0, 0xffff0000);
930  __ Bics(w0, w0, Operand(0xfffffff0));
931  END();
932
933  RUN();
934
935  ASSERT_EQUAL_NZCV(ZFlag);
936  ASSERT_EQUAL_64(0x00000000, x0);
937
938  TEARDOWN();
939}
940
941
942TEST(eor) {
943  INIT_V8();
944  SETUP();
945
946  START();
947  __ Mov(x0, 0xfff0);
948  __ Mov(x1, 0xf00000ff);
949
950  __ Eor(x2, x0, Operand(x1));
951  __ Eor(w3, w0, Operand(w1, LSL, 4));
952  __ Eor(x4, x0, Operand(x1, LSL, 4));
953  __ Eor(x5, x0, Operand(x1, LSR, 1));
954  __ Eor(w6, w0, Operand(w1, ASR, 20));
955  __ Eor(x7, x0, Operand(x1, ASR, 20));
956  __ Eor(w8, w0, Operand(w1, ROR, 28));
957  __ Eor(x9, x0, Operand(x1, ROR, 28));
958  __ Eor(w10, w0, Operand(0xff00ff00));
959  __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
960  END();
961
962  RUN();
963
964  ASSERT_EQUAL_64(0xf000ff0f, x2);
965  ASSERT_EQUAL_64(0x0000f000, x3);
966  ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
967  ASSERT_EQUAL_64(0x7800ff8f, x5);
968  ASSERT_EQUAL_64(0xffff00f0, x6);
969  ASSERT_EQUAL_64(0x0000f0f0, x7);
970  ASSERT_EQUAL_64(0x0000f00f, x8);
971  ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
972  ASSERT_EQUAL_64(0xff0000f0, x10);
973  ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
974
975  TEARDOWN();
976}
977
978
979TEST(eor_extend) {
980  INIT_V8();
981  SETUP();
982
983  START();
984  __ Mov(x0, 0x1111111111111111UL);
985  __ Mov(x1, 0x8000000080008081UL);
986  __ Eor(w6, w0, Operand(w1, UXTB));
987  __ Eor(x7, x0, Operand(x1, UXTH, 1));
988  __ Eor(w8, w0, Operand(w1, UXTW, 2));
989  __ Eor(x9, x0, Operand(x1, UXTX, 3));
990  __ Eor(w10, w0, Operand(w1, SXTB));
991  __ Eor(x11, x0, Operand(x1, SXTH, 1));
992  __ Eor(x12, x0, Operand(x1, SXTW, 2));
993  __ Eor(x13, x0, Operand(x1, SXTX, 3));
994  END();
995
996  RUN();
997
998  ASSERT_EQUAL_64(0x11111190, x6);
999  ASSERT_EQUAL_64(0x1111111111101013UL, x7);
1000  ASSERT_EQUAL_64(0x11131315, x8);
1001  ASSERT_EQUAL_64(0x1111111511151519UL, x9);
1002  ASSERT_EQUAL_64(0xeeeeee90, x10);
1003  ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
1004  ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
1005  ASSERT_EQUAL_64(0x1111111511151519UL, x13);
1006
1007  TEARDOWN();
1008}
1009
1010
1011TEST(eon) {
1012  INIT_V8();
1013  SETUP();
1014
1015  START();
1016  __ Mov(x0, 0xfff0);
1017  __ Mov(x1, 0xf00000ff);
1018
1019  __ Eon(x2, x0, Operand(x1));
1020  __ Eon(w3, w0, Operand(w1, LSL, 4));
1021  __ Eon(x4, x0, Operand(x1, LSL, 4));
1022  __ Eon(x5, x0, Operand(x1, LSR, 1));
1023  __ Eon(w6, w0, Operand(w1, ASR, 20));
1024  __ Eon(x7, x0, Operand(x1, ASR, 20));
1025  __ Eon(w8, w0, Operand(w1, ROR, 28));
1026  __ Eon(x9, x0, Operand(x1, ROR, 28));
1027  __ Eon(w10, w0, Operand(0x03c003c0));
1028  __ Eon(x11, x0, Operand(0x0000100000001000L));
1029  END();
1030
1031  RUN();
1032
1033  ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
1034  ASSERT_EQUAL_64(0xffff0fff, x3);
1035  ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
1036  ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
1037  ASSERT_EQUAL_64(0x0000ff0f, x6);
1038  ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
1039  ASSERT_EQUAL_64(0xffff0ff0, x8);
1040  ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
1041  ASSERT_EQUAL_64(0xfc3f03cf, x10);
1042  ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
1043
1044  TEARDOWN();
1045}
1046
1047
1048TEST(eon_extend) {
1049  INIT_V8();
1050  SETUP();
1051
1052  START();
1053  __ Mov(x0, 0x1111111111111111UL);
1054  __ Mov(x1, 0x8000000080008081UL);
1055  __ Eon(w6, w0, Operand(w1, UXTB));
1056  __ Eon(x7, x0, Operand(x1, UXTH, 1));
1057  __ Eon(w8, w0, Operand(w1, UXTW, 2));
1058  __ Eon(x9, x0, Operand(x1, UXTX, 3));
1059  __ Eon(w10, w0, Operand(w1, SXTB));
1060  __ Eon(x11, x0, Operand(x1, SXTH, 1));
1061  __ Eon(x12, x0, Operand(x1, SXTW, 2));
1062  __ Eon(x13, x0, Operand(x1, SXTX, 3));
1063  END();
1064
1065  RUN();
1066
1067  ASSERT_EQUAL_64(0xeeeeee6f, x6);
1068  ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
1069  ASSERT_EQUAL_64(0xeeececea, x8);
1070  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
1071  ASSERT_EQUAL_64(0x1111116f, x10);
1072  ASSERT_EQUAL_64(0x111111111111efecUL, x11);
1073  ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
1074  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
1075
1076  TEARDOWN();
1077}
1078
1079
1080TEST(mul) {
1081  INIT_V8();
1082  SETUP();
1083
1084  START();
1085  __ Mov(x16, 0);
1086  __ Mov(x17, 1);
1087  __ Mov(x18, 0xffffffff);
1088  __ Mov(x19, 0xffffffffffffffffUL);
1089
1090  __ Mul(w0, w16, w16);
1091  __ Mul(w1, w16, w17);
1092  __ Mul(w2, w17, w18);
1093  __ Mul(w3, w18, w19);
1094  __ Mul(x4, x16, x16);
1095  __ Mul(x5, x17, x18);
1096  __ Mul(x6, x18, x19);
1097  __ Mul(x7, x19, x19);
1098  __ Smull(x8, w17, w18);
1099  __ Smull(x9, w18, w18);
1100  __ Smull(x10, w19, w19);
1101  __ Mneg(w11, w16, w16);
1102  __ Mneg(w12, w16, w17);
1103  __ Mneg(w13, w17, w18);
1104  __ Mneg(w14, w18, w19);
1105  __ Mneg(x20, x16, x16);
1106  __ Mneg(x21, x17, x18);
1107  __ Mneg(x22, x18, x19);
1108  __ Mneg(x23, x19, x19);
1109  END();
1110
1111  RUN();
1112
1113  ASSERT_EQUAL_64(0, x0);
1114  ASSERT_EQUAL_64(0, x1);
1115  ASSERT_EQUAL_64(0xffffffff, x2);
1116  ASSERT_EQUAL_64(1, x3);
1117  ASSERT_EQUAL_64(0, x4);
1118  ASSERT_EQUAL_64(0xffffffff, x5);
1119  ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
1120  ASSERT_EQUAL_64(1, x7);
1121  ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
1122  ASSERT_EQUAL_64(1, x9);
1123  ASSERT_EQUAL_64(1, x10);
1124  ASSERT_EQUAL_64(0, x11);
1125  ASSERT_EQUAL_64(0, x12);
1126  ASSERT_EQUAL_64(1, x13);
1127  ASSERT_EQUAL_64(0xffffffff, x14);
1128  ASSERT_EQUAL_64(0, x20);
1129  ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
1130  ASSERT_EQUAL_64(0xffffffff, x22);
1131  ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
1132
1133  TEARDOWN();
1134}
1135
1136
1137static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1138  SETUP();
1139  START();
1140  __ Mov(w0, a);
1141  __ Mov(w1, b);
1142  __ Smull(x2, w0, w1);
1143  END();
1144  RUN();
1145  ASSERT_EQUAL_64(expected, x2);
1146  TEARDOWN();
1147}
1148
1149
1150TEST(smull) {
1151  INIT_V8();
1152  SmullHelper(0, 0, 0);
1153  SmullHelper(1, 1, 1);
1154  SmullHelper(-1, -1, 1);
1155  SmullHelper(1, -1, -1);
1156  SmullHelper(0xffffffff80000000, 0x80000000, 1);
1157  SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1158}
1159
1160
1161TEST(madd) {
1162  INIT_V8();
1163  SETUP();
1164
1165  START();
1166  __ Mov(x16, 0);
1167  __ Mov(x17, 1);
1168  __ Mov(x18, 0xffffffff);
1169  __ Mov(x19, 0xffffffffffffffffUL);
1170
1171  __ Madd(w0, w16, w16, w16);
1172  __ Madd(w1, w16, w16, w17);
1173  __ Madd(w2, w16, w16, w18);
1174  __ Madd(w3, w16, w16, w19);
1175  __ Madd(w4, w16, w17, w17);
1176  __ Madd(w5, w17, w17, w18);
1177  __ Madd(w6, w17, w17, w19);
1178  __ Madd(w7, w17, w18, w16);
1179  __ Madd(w8, w17, w18, w18);
1180  __ Madd(w9, w18, w18, w17);
1181  __ Madd(w10, w18, w19, w18);
1182  __ Madd(w11, w19, w19, w19);
1183
1184  __ Madd(x12, x16, x16, x16);
1185  __ Madd(x13, x16, x16, x17);
1186  __ Madd(x14, x16, x16, x18);
1187  __ Madd(x15, x16, x16, x19);
1188  __ Madd(x20, x16, x17, x17);
1189  __ Madd(x21, x17, x17, x18);
1190  __ Madd(x22, x17, x17, x19);
1191  __ Madd(x23, x17, x18, x16);
1192  __ Madd(x24, x17, x18, x18);
1193  __ Madd(x25, x18, x18, x17);
1194  __ Madd(x26, x18, x19, x18);
1195  __ Madd(x27, x19, x19, x19);
1196
1197  END();
1198
1199  RUN();
1200
1201  ASSERT_EQUAL_64(0, x0);
1202  ASSERT_EQUAL_64(1, x1);
1203  ASSERT_EQUAL_64(0xffffffff, x2);
1204  ASSERT_EQUAL_64(0xffffffff, x3);
1205  ASSERT_EQUAL_64(1, x4);
1206  ASSERT_EQUAL_64(0, x5);
1207  ASSERT_EQUAL_64(0, x6);
1208  ASSERT_EQUAL_64(0xffffffff, x7);
1209  ASSERT_EQUAL_64(0xfffffffe, x8);
1210  ASSERT_EQUAL_64(2, x9);
1211  ASSERT_EQUAL_64(0, x10);
1212  ASSERT_EQUAL_64(0, x11);
1213
1214  ASSERT_EQUAL_64(0, x12);
1215  ASSERT_EQUAL_64(1, x13);
1216  ASSERT_EQUAL_64(0xffffffff, x14);
1217  ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1218  ASSERT_EQUAL_64(1, x20);
1219  ASSERT_EQUAL_64(0x100000000UL, x21);
1220  ASSERT_EQUAL_64(0, x22);
1221  ASSERT_EQUAL_64(0xffffffff, x23);
1222  ASSERT_EQUAL_64(0x1fffffffe, x24);
1223  ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
1224  ASSERT_EQUAL_64(0, x26);
1225  ASSERT_EQUAL_64(0, x27);
1226
1227  TEARDOWN();
1228}
1229
1230
1231TEST(msub) {
1232  INIT_V8();
1233  SETUP();
1234
1235  START();
1236  __ Mov(x16, 0);
1237  __ Mov(x17, 1);
1238  __ Mov(x18, 0xffffffff);
1239  __ Mov(x19, 0xffffffffffffffffUL);
1240
1241  __ Msub(w0, w16, w16, w16);
1242  __ Msub(w1, w16, w16, w17);
1243  __ Msub(w2, w16, w16, w18);
1244  __ Msub(w3, w16, w16, w19);
1245  __ Msub(w4, w16, w17, w17);
1246  __ Msub(w5, w17, w17, w18);
1247  __ Msub(w6, w17, w17, w19);
1248  __ Msub(w7, w17, w18, w16);
1249  __ Msub(w8, w17, w18, w18);
1250  __ Msub(w9, w18, w18, w17);
1251  __ Msub(w10, w18, w19, w18);
1252  __ Msub(w11, w19, w19, w19);
1253
1254  __ Msub(x12, x16, x16, x16);
1255  __ Msub(x13, x16, x16, x17);
1256  __ Msub(x14, x16, x16, x18);
1257  __ Msub(x15, x16, x16, x19);
1258  __ Msub(x20, x16, x17, x17);
1259  __ Msub(x21, x17, x17, x18);
1260  __ Msub(x22, x17, x17, x19);
1261  __ Msub(x23, x17, x18, x16);
1262  __ Msub(x24, x17, x18, x18);
1263  __ Msub(x25, x18, x18, x17);
1264  __ Msub(x26, x18, x19, x18);
1265  __ Msub(x27, x19, x19, x19);
1266
1267  END();
1268
1269  RUN();
1270
1271  ASSERT_EQUAL_64(0, x0);
1272  ASSERT_EQUAL_64(1, x1);
1273  ASSERT_EQUAL_64(0xffffffff, x2);
1274  ASSERT_EQUAL_64(0xffffffff, x3);
1275  ASSERT_EQUAL_64(1, x4);
1276  ASSERT_EQUAL_64(0xfffffffe, x5);
1277  ASSERT_EQUAL_64(0xfffffffe, x6);
1278  ASSERT_EQUAL_64(1, x7);
1279  ASSERT_EQUAL_64(0, x8);
1280  ASSERT_EQUAL_64(0, x9);
1281  ASSERT_EQUAL_64(0xfffffffe, x10);
1282  ASSERT_EQUAL_64(0xfffffffe, x11);
1283
1284  ASSERT_EQUAL_64(0, x12);
1285  ASSERT_EQUAL_64(1, x13);
1286  ASSERT_EQUAL_64(0xffffffff, x14);
1287  ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
1288  ASSERT_EQUAL_64(1, x20);
1289  ASSERT_EQUAL_64(0xfffffffeUL, x21);
1290  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
1291  ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
1292  ASSERT_EQUAL_64(0, x24);
1293  ASSERT_EQUAL_64(0x200000000UL, x25);
1294  ASSERT_EQUAL_64(0x1fffffffeUL, x26);
1295  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
1296
1297  TEARDOWN();
1298}
1299
1300
1301TEST(smulh) {
1302  INIT_V8();
1303  SETUP();
1304
1305  START();
1306  __ Mov(x20, 0);
1307  __ Mov(x21, 1);
1308  __ Mov(x22, 0x0000000100000000L);
1309  __ Mov(x23, 0x12345678);
1310  __ Mov(x24, 0x0123456789abcdefL);
1311  __ Mov(x25, 0x0000000200000000L);
1312  __ Mov(x26, 0x8000000000000000UL);
1313  __ Mov(x27, 0xffffffffffffffffUL);
1314  __ Mov(x28, 0x5555555555555555UL);
1315  __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
1316
1317  __ Smulh(x0, x20, x24);
1318  __ Smulh(x1, x21, x24);
1319  __ Smulh(x2, x22, x23);
1320  __ Smulh(x3, x22, x24);
1321  __ Smulh(x4, x24, x25);
1322  __ Smulh(x5, x23, x27);
1323  __ Smulh(x6, x26, x26);
1324  __ Smulh(x7, x26, x27);
1325  __ Smulh(x8, x27, x27);
1326  __ Smulh(x9, x28, x28);
1327  __ Smulh(x10, x28, x29);
1328  __ Smulh(x11, x29, x29);
1329  END();
1330
1331  RUN();
1332
1333  ASSERT_EQUAL_64(0, x0);
1334  ASSERT_EQUAL_64(0, x1);
1335  ASSERT_EQUAL_64(0, x2);
1336  ASSERT_EQUAL_64(0x01234567, x3);
1337  ASSERT_EQUAL_64(0x02468acf, x4);
1338  ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
1339  ASSERT_EQUAL_64(0x4000000000000000UL, x6);
1340  ASSERT_EQUAL_64(0, x7);
1341  ASSERT_EQUAL_64(0, x8);
1342  ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
1343  ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
1344  ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
1345
1346  TEARDOWN();
1347}
1348
1349
1350TEST(smaddl_umaddl) {
1351  INIT_V8();
1352  SETUP();
1353
1354  START();
1355  __ Mov(x17, 1);
1356  __ Mov(x18, 0xffffffff);
1357  __ Mov(x19, 0xffffffffffffffffUL);
1358  __ Mov(x20, 4);
1359  __ Mov(x21, 0x200000000UL);
1360
1361  __ Smaddl(x9, w17, w18, x20);
1362  __ Smaddl(x10, w18, w18, x20);
1363  __ Smaddl(x11, w19, w19, x20);
1364  __ Smaddl(x12, w19, w19, x21);
1365  __ Umaddl(x13, w17, w18, x20);
1366  __ Umaddl(x14, w18, w18, x20);
1367  __ Umaddl(x15, w19, w19, x20);
1368  __ Umaddl(x22, w19, w19, x21);
1369  END();
1370
1371  RUN();
1372
1373  ASSERT_EQUAL_64(3, x9);
1374  ASSERT_EQUAL_64(5, x10);
1375  ASSERT_EQUAL_64(5, x11);
1376  ASSERT_EQUAL_64(0x200000001UL, x12);
1377  ASSERT_EQUAL_64(0x100000003UL, x13);
1378  ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
1379  ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
1380  ASSERT_EQUAL_64(0x1, x22);
1381
1382  TEARDOWN();
1383}
1384
1385
1386TEST(smsubl_umsubl) {
1387  INIT_V8();
1388  SETUP();
1389
1390  START();
1391  __ Mov(x17, 1);
1392  __ Mov(x18, 0xffffffff);
1393  __ Mov(x19, 0xffffffffffffffffUL);
1394  __ Mov(x20, 4);
1395  __ Mov(x21, 0x200000000UL);
1396
1397  __ Smsubl(x9, w17, w18, x20);
1398  __ Smsubl(x10, w18, w18, x20);
1399  __ Smsubl(x11, w19, w19, x20);
1400  __ Smsubl(x12, w19, w19, x21);
1401  __ Umsubl(x13, w17, w18, x20);
1402  __ Umsubl(x14, w18, w18, x20);
1403  __ Umsubl(x15, w19, w19, x20);
1404  __ Umsubl(x22, w19, w19, x21);
1405  END();
1406
1407  RUN();
1408
1409  ASSERT_EQUAL_64(5, x9);
1410  ASSERT_EQUAL_64(3, x10);
1411  ASSERT_EQUAL_64(3, x11);
1412  ASSERT_EQUAL_64(0x1ffffffffUL, x12);
1413  ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
1414  ASSERT_EQUAL_64(0x200000003UL, x14);
1415  ASSERT_EQUAL_64(0x200000003UL, x15);
1416  ASSERT_EQUAL_64(0x3ffffffffUL, x22);
1417
1418  TEARDOWN();
1419}
1420
1421
1422TEST(div) {
1423  INIT_V8();
1424  SETUP();
1425
1426  START();
1427  __ Mov(x16, 1);
1428  __ Mov(x17, 0xffffffff);
1429  __ Mov(x18, 0xffffffffffffffffUL);
1430  __ Mov(x19, 0x80000000);
1431  __ Mov(x20, 0x8000000000000000UL);
1432  __ Mov(x21, 2);
1433
1434  __ Udiv(w0, w16, w16);
1435  __ Udiv(w1, w17, w16);
1436  __ Sdiv(w2, w16, w16);
1437  __ Sdiv(w3, w16, w17);
1438  __ Sdiv(w4, w17, w18);
1439
1440  __ Udiv(x5, x16, x16);
1441  __ Udiv(x6, x17, x18);
1442  __ Sdiv(x7, x16, x16);
1443  __ Sdiv(x8, x16, x17);
1444  __ Sdiv(x9, x17, x18);
1445
1446  __ Udiv(w10, w19, w21);
1447  __ Sdiv(w11, w19, w21);
1448  __ Udiv(x12, x19, x21);
1449  __ Sdiv(x13, x19, x21);
1450  __ Udiv(x14, x20, x21);
1451  __ Sdiv(x15, x20, x21);
1452
1453  __ Udiv(w22, w19, w17);
1454  __ Sdiv(w23, w19, w17);
1455  __ Udiv(x24, x20, x18);
1456  __ Sdiv(x25, x20, x18);
1457
1458  __ Udiv(x26, x16, x21);
1459  __ Sdiv(x27, x16, x21);
1460  __ Udiv(x28, x18, x21);
1461  __ Sdiv(x29, x18, x21);
1462
1463  __ Mov(x17, 0);
1464  __ Udiv(w18, w16, w17);
1465  __ Sdiv(w19, w16, w17);
1466  __ Udiv(x20, x16, x17);
1467  __ Sdiv(x21, x16, x17);
1468  END();
1469
1470  RUN();
1471
1472  ASSERT_EQUAL_64(1, x0);
1473  ASSERT_EQUAL_64(0xffffffff, x1);
1474  ASSERT_EQUAL_64(1, x2);
1475  ASSERT_EQUAL_64(0xffffffff, x3);
1476  ASSERT_EQUAL_64(1, x4);
1477  ASSERT_EQUAL_64(1, x5);
1478  ASSERT_EQUAL_64(0, x6);
1479  ASSERT_EQUAL_64(1, x7);
1480  ASSERT_EQUAL_64(0, x8);
1481  ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
1482  ASSERT_EQUAL_64(0x40000000, x10);
1483  ASSERT_EQUAL_64(0xC0000000, x11);
1484  ASSERT_EQUAL_64(0x40000000, x12);
1485  ASSERT_EQUAL_64(0x40000000, x13);
1486  ASSERT_EQUAL_64(0x4000000000000000UL, x14);
1487  ASSERT_EQUAL_64(0xC000000000000000UL, x15);
1488  ASSERT_EQUAL_64(0, x22);
1489  ASSERT_EQUAL_64(0x80000000, x23);
1490  ASSERT_EQUAL_64(0, x24);
1491  ASSERT_EQUAL_64(0x8000000000000000UL, x25);
1492  ASSERT_EQUAL_64(0, x26);
1493  ASSERT_EQUAL_64(0, x27);
1494  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
1495  ASSERT_EQUAL_64(0, x29);
1496  ASSERT_EQUAL_64(0, x18);
1497  ASSERT_EQUAL_64(0, x19);
1498  ASSERT_EQUAL_64(0, x20);
1499  ASSERT_EQUAL_64(0, x21);
1500
1501  TEARDOWN();
1502}
1503
1504
1505TEST(rbit_rev) {
1506  INIT_V8();
1507  SETUP();
1508
1509  START();
1510  __ Mov(x24, 0xfedcba9876543210UL);
1511  __ Rbit(w0, w24);
1512  __ Rbit(x1, x24);
1513  __ Rev16(w2, w24);
1514  __ Rev16(x3, x24);
1515  __ Rev(w4, w24);
1516  __ Rev32(x5, x24);
1517  __ Rev(x6, x24);
1518  END();
1519
1520  RUN();
1521
1522  ASSERT_EQUAL_64(0x084c2a6e, x0);
1523  ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
1524  ASSERT_EQUAL_64(0x54761032, x2);
1525  ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
1526  ASSERT_EQUAL_64(0x10325476, x4);
1527  ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
1528  ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
1529
1530  TEARDOWN();
1531}
1532
1533
1534TEST(clz_cls) {
1535  INIT_V8();
1536  SETUP();
1537
1538  START();
1539  __ Mov(x24, 0x0008000000800000UL);
1540  __ Mov(x25, 0xff800000fff80000UL);
1541  __ Mov(x26, 0);
1542  __ Clz(w0, w24);
1543  __ Clz(x1, x24);
1544  __ Clz(w2, w25);
1545  __ Clz(x3, x25);
1546  __ Clz(w4, w26);
1547  __ Clz(x5, x26);
1548  __ Cls(w6, w24);
1549  __ Cls(x7, x24);
1550  __ Cls(w8, w25);
1551  __ Cls(x9, x25);
1552  __ Cls(w10, w26);
1553  __ Cls(x11, x26);
1554  END();
1555
1556  RUN();
1557
1558  ASSERT_EQUAL_64(8, x0);
1559  ASSERT_EQUAL_64(12, x1);
1560  ASSERT_EQUAL_64(0, x2);
1561  ASSERT_EQUAL_64(0, x3);
1562  ASSERT_EQUAL_64(32, x4);
1563  ASSERT_EQUAL_64(64, x5);
1564  ASSERT_EQUAL_64(7, x6);
1565  ASSERT_EQUAL_64(11, x7);
1566  ASSERT_EQUAL_64(12, x8);
1567  ASSERT_EQUAL_64(8, x9);
1568  ASSERT_EQUAL_64(31, x10);
1569  ASSERT_EQUAL_64(63, x11);
1570
1571  TEARDOWN();
1572}
1573
1574
1575TEST(label) {
1576  INIT_V8();
1577  SETUP();
1578
1579  Label label_1, label_2, label_3, label_4;
1580
1581  START();
1582  __ Mov(x0, 0x1);
1583  __ Mov(x1, 0x0);
1584  __ Mov(x22, lr);    // Save lr.
1585
1586  __ B(&label_1);
1587  __ B(&label_1);
1588  __ B(&label_1);     // Multiple branches to the same label.
1589  __ Mov(x0, 0x0);
1590  __ Bind(&label_2);
1591  __ B(&label_3);     // Forward branch.
1592  __ Mov(x0, 0x0);
1593  __ Bind(&label_1);
1594  __ B(&label_2);     // Backward branch.
1595  __ Mov(x0, 0x0);
1596  __ Bind(&label_3);
1597  __ Bl(&label_4);
1598  END();
1599
1600  __ Bind(&label_4);
1601  __ Mov(x1, 0x1);
1602  __ Mov(lr, x22);
1603  END();
1604
1605  RUN();
1606
1607  ASSERT_EQUAL_64(0x1, x0);
1608  ASSERT_EQUAL_64(0x1, x1);
1609
1610  TEARDOWN();
1611}
1612
1613
1614TEST(branch_at_start) {
1615  INIT_V8();
1616  SETUP();
1617
1618  Label good, exit;
1619
1620  // Test that branches can exist at the start of the buffer. (This is a
1621  // boundary condition in the label-handling code.) To achieve this, we have
1622  // to work around the code generated by START.
1623  RESET();
1624  __ B(&good);
1625
1626  START_AFTER_RESET();
1627  __ Mov(x0, 0x0);
1628  END();
1629
1630  __ Bind(&exit);
1631  START_AFTER_RESET();
1632  __ Mov(x0, 0x1);
1633  END();
1634
1635  __ Bind(&good);
1636  __ B(&exit);
1637  END();
1638
1639  RUN();
1640
1641  ASSERT_EQUAL_64(0x1, x0);
1642  TEARDOWN();
1643}
1644
1645
1646TEST(adr) {
1647  INIT_V8();
1648  SETUP();
1649
1650  Label label_1, label_2, label_3, label_4;
1651
1652  START();
1653  __ Mov(x0, 0x0);        // Set to non-zero to indicate failure.
1654  __ Adr(x1, &label_3);   // Set to zero to indicate success.
1655
1656  __ Adr(x2, &label_1);   // Multiple forward references to the same label.
1657  __ Adr(x3, &label_1);
1658  __ Adr(x4, &label_1);
1659
1660  __ Bind(&label_2);
1661  __ Eor(x5, x2, Operand(x3));  // Ensure that x2,x3 and x4 are identical.
1662  __ Eor(x6, x2, Operand(x4));
1663  __ Orr(x0, x0, Operand(x5));
1664  __ Orr(x0, x0, Operand(x6));
1665  __ Br(x2);  // label_1, label_3
1666
1667  __ Bind(&label_3);
1668  __ Adr(x2, &label_3);   // Self-reference (offset 0).
1669  __ Eor(x1, x1, Operand(x2));
1670  __ Adr(x2, &label_4);   // Simple forward reference.
1671  __ Br(x2);  // label_4
1672
1673  __ Bind(&label_1);
1674  __ Adr(x2, &label_3);   // Multiple reverse references to the same label.
1675  __ Adr(x3, &label_3);
1676  __ Adr(x4, &label_3);
1677  __ Adr(x5, &label_2);   // Simple reverse reference.
1678  __ Br(x5);  // label_2
1679
1680  __ Bind(&label_4);
1681  END();
1682
1683  RUN();
1684
1685  ASSERT_EQUAL_64(0x0, x0);
1686  ASSERT_EQUAL_64(0x0, x1);
1687
1688  TEARDOWN();
1689}
1690
1691
1692TEST(adr_far) {
1693  INIT_V8();
1694
1695  int max_range = 1 << (Instruction::ImmPCRelRangeBitwidth - 1);
1696  SETUP_SIZE(max_range + 1000 * kInstructionSize);
1697
1698  Label done, fail;
1699  Label test_near, near_forward, near_backward;
1700  Label test_far, far_forward, far_backward;
1701
1702  START();
1703  __ Mov(x0, 0x0);
1704
1705  __ Bind(&test_near);
1706  __ Adr(x10, &near_forward, MacroAssembler::kAdrFar);
1707  __ Br(x10);
1708  __ B(&fail);
1709  __ Bind(&near_backward);
1710  __ Orr(x0, x0, 1 << 1);
1711  __ B(&test_far);
1712
1713  __ Bind(&near_forward);
1714  __ Orr(x0, x0, 1 << 0);
1715  __ Adr(x10, &near_backward, MacroAssembler::kAdrFar);
1716  __ Br(x10);
1717
1718  __ Bind(&test_far);
1719  __ Adr(x10, &far_forward, MacroAssembler::kAdrFar);
1720  __ Br(x10);
1721  __ B(&fail);
1722  __ Bind(&far_backward);
1723  __ Orr(x0, x0, 1 << 3);
1724  __ B(&done);
1725
1726  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
1727    if (i % 100 == 0) {
1728      // If we do land in this code, we do not want to execute so many nops
1729      // before reaching the end of test (especially if tracing is activated).
1730      __ b(&fail);
1731    } else {
1732      __ nop();
1733    }
1734  }
1735
1736
1737  __ Bind(&far_forward);
1738  __ Orr(x0, x0, 1 << 2);
1739  __ Adr(x10, &far_backward, MacroAssembler::kAdrFar);
1740  __ Br(x10);
1741
1742  __ B(&done);
1743  __ Bind(&fail);
1744  __ Orr(x0, x0, 1 << 4);
1745  __ Bind(&done);
1746
1747  END();
1748
1749  RUN();
1750
1751  ASSERT_EQUAL_64(0xf, x0);
1752
1753  TEARDOWN();
1754}
1755
1756
1757TEST(branch_cond) {
1758  INIT_V8();
1759  SETUP();
1760
1761  Label wrong;
1762
1763  START();
1764  __ Mov(x0, 0x1);
1765  __ Mov(x1, 0x1);
1766  __ Mov(x2, 0x8000000000000000L);
1767
1768  // For each 'cmp' instruction below, condition codes other than the ones
1769  // following it would branch.
1770
1771  __ Cmp(x1, 0);
1772  __ B(&wrong, eq);
1773  __ B(&wrong, lo);
1774  __ B(&wrong, mi);
1775  __ B(&wrong, vs);
1776  __ B(&wrong, ls);
1777  __ B(&wrong, lt);
1778  __ B(&wrong, le);
1779  Label ok_1;
1780  __ B(&ok_1, ne);
1781  __ Mov(x0, 0x0);
1782  __ Bind(&ok_1);
1783
1784  __ Cmp(x1, 1);
1785  __ B(&wrong, ne);
1786  __ B(&wrong, lo);
1787  __ B(&wrong, mi);
1788  __ B(&wrong, vs);
1789  __ B(&wrong, hi);
1790  __ B(&wrong, lt);
1791  __ B(&wrong, gt);
1792  Label ok_2;
1793  __ B(&ok_2, pl);
1794  __ Mov(x0, 0x0);
1795  __ Bind(&ok_2);
1796
1797  __ Cmp(x1, 2);
1798  __ B(&wrong, eq);
1799  __ B(&wrong, hs);
1800  __ B(&wrong, pl);
1801  __ B(&wrong, vs);
1802  __ B(&wrong, hi);
1803  __ B(&wrong, ge);
1804  __ B(&wrong, gt);
1805  Label ok_3;
1806  __ B(&ok_3, vc);
1807  __ Mov(x0, 0x0);
1808  __ Bind(&ok_3);
1809
1810  __ Cmp(x2, 1);
1811  __ B(&wrong, eq);
1812  __ B(&wrong, lo);
1813  __ B(&wrong, mi);
1814  __ B(&wrong, vc);
1815  __ B(&wrong, ls);
1816  __ B(&wrong, ge);
1817  __ B(&wrong, gt);
1818  Label ok_4;
1819  __ B(&ok_4, le);
1820  __ Mov(x0, 0x0);
1821  __ Bind(&ok_4);
1822
1823  Label ok_5;
1824  __ b(&ok_5, al);
1825  __ Mov(x0, 0x0);
1826  __ Bind(&ok_5);
1827
1828  Label ok_6;
1829  __ b(&ok_6, nv);
1830  __ Mov(x0, 0x0);
1831  __ Bind(&ok_6);
1832
1833  END();
1834
1835  __ Bind(&wrong);
1836  __ Mov(x0, 0x0);
1837  END();
1838
1839  RUN();
1840
1841  ASSERT_EQUAL_64(0x1, x0);
1842
1843  TEARDOWN();
1844}
1845
1846
1847TEST(branch_to_reg) {
1848  INIT_V8();
1849  SETUP();
1850
1851  // Test br.
1852  Label fn1, after_fn1;
1853
1854  START();
1855  __ Mov(x29, lr);
1856
1857  __ Mov(x1, 0);
1858  __ B(&after_fn1);
1859
1860  __ Bind(&fn1);
1861  __ Mov(x0, lr);
1862  __ Mov(x1, 42);
1863  __ Br(x0);
1864
1865  __ Bind(&after_fn1);
1866  __ Bl(&fn1);
1867
1868  // Test blr.
1869  Label fn2, after_fn2;
1870
1871  __ Mov(x2, 0);
1872  __ B(&after_fn2);
1873
1874  __ Bind(&fn2);
1875  __ Mov(x0, lr);
1876  __ Mov(x2, 84);
1877  __ Blr(x0);
1878
1879  __ Bind(&after_fn2);
1880  __ Bl(&fn2);
1881  __ Mov(x3, lr);
1882
1883  __ Mov(lr, x29);
1884  END();
1885
1886  RUN();
1887
1888  ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1889  ASSERT_EQUAL_64(42, x1);
1890  ASSERT_EQUAL_64(84, x2);
1891
1892  TEARDOWN();
1893}
1894
1895
1896TEST(compare_branch) {
1897  INIT_V8();
1898  SETUP();
1899
1900  START();
1901  __ Mov(x0, 0);
1902  __ Mov(x1, 0);
1903  __ Mov(x2, 0);
1904  __ Mov(x3, 0);
1905  __ Mov(x4, 0);
1906  __ Mov(x5, 0);
1907  __ Mov(x16, 0);
1908  __ Mov(x17, 42);
1909
1910  Label zt, zt_end;
1911  __ Cbz(w16, &zt);
1912  __ B(&zt_end);
1913  __ Bind(&zt);
1914  __ Mov(x0, 1);
1915  __ Bind(&zt_end);
1916
1917  Label zf, zf_end;
1918  __ Cbz(x17, &zf);
1919  __ B(&zf_end);
1920  __ Bind(&zf);
1921  __ Mov(x1, 1);
1922  __ Bind(&zf_end);
1923
1924  Label nzt, nzt_end;
1925  __ Cbnz(w17, &nzt);
1926  __ B(&nzt_end);
1927  __ Bind(&nzt);
1928  __ Mov(x2, 1);
1929  __ Bind(&nzt_end);
1930
1931  Label nzf, nzf_end;
1932  __ Cbnz(x16, &nzf);
1933  __ B(&nzf_end);
1934  __ Bind(&nzf);
1935  __ Mov(x3, 1);
1936  __ Bind(&nzf_end);
1937
1938  __ Mov(x18, 0xffffffff00000000UL);
1939
1940  Label a, a_end;
1941  __ Cbz(w18, &a);
1942  __ B(&a_end);
1943  __ Bind(&a);
1944  __ Mov(x4, 1);
1945  __ Bind(&a_end);
1946
1947  Label b, b_end;
1948  __ Cbnz(w18, &b);
1949  __ B(&b_end);
1950  __ Bind(&b);
1951  __ Mov(x5, 1);
1952  __ Bind(&b_end);
1953
1954  END();
1955
1956  RUN();
1957
1958  ASSERT_EQUAL_64(1, x0);
1959  ASSERT_EQUAL_64(0, x1);
1960  ASSERT_EQUAL_64(1, x2);
1961  ASSERT_EQUAL_64(0, x3);
1962  ASSERT_EQUAL_64(1, x4);
1963  ASSERT_EQUAL_64(0, x5);
1964
1965  TEARDOWN();
1966}
1967
1968
1969TEST(test_branch) {
1970  INIT_V8();
1971  SETUP();
1972
1973  START();
1974  __ Mov(x0, 0);
1975  __ Mov(x1, 0);
1976  __ Mov(x2, 0);
1977  __ Mov(x3, 0);
1978  __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
1979
1980  Label bz, bz_end;
1981  __ Tbz(w16, 0, &bz);
1982  __ B(&bz_end);
1983  __ Bind(&bz);
1984  __ Mov(x0, 1);
1985  __ Bind(&bz_end);
1986
1987  Label bo, bo_end;
1988  __ Tbz(x16, 63, &bo);
1989  __ B(&bo_end);
1990  __ Bind(&bo);
1991  __ Mov(x1, 1);
1992  __ Bind(&bo_end);
1993
1994  Label nbz, nbz_end;
1995  __ Tbnz(x16, 61, &nbz);
1996  __ B(&nbz_end);
1997  __ Bind(&nbz);
1998  __ Mov(x2, 1);
1999  __ Bind(&nbz_end);
2000
2001  Label nbo, nbo_end;
2002  __ Tbnz(w16, 2, &nbo);
2003  __ B(&nbo_end);
2004  __ Bind(&nbo);
2005  __ Mov(x3, 1);
2006  __ Bind(&nbo_end);
2007  END();
2008
2009  RUN();
2010
2011  ASSERT_EQUAL_64(1, x0);
2012  ASSERT_EQUAL_64(0, x1);
2013  ASSERT_EQUAL_64(1, x2);
2014  ASSERT_EQUAL_64(0, x3);
2015
2016  TEARDOWN();
2017}
2018
2019
2020TEST(far_branch_backward) {
2021  INIT_V8();
2022
2023  // Test that the MacroAssembler correctly resolves backward branches to labels
2024  // that are outside the immediate range of branch instructions.
2025  int max_range =
2026    std::max(Instruction::ImmBranchRange(TestBranchType),
2027             std::max(Instruction::ImmBranchRange(CompareBranchType),
2028                      Instruction::ImmBranchRange(CondBranchType)));
2029
2030  SETUP_SIZE(max_range + 1000 * kInstructionSize);
2031
2032  START();
2033
2034  Label done, fail;
2035  Label test_tbz, test_cbz, test_bcond;
2036  Label success_tbz, success_cbz, success_bcond;
2037
2038  __ Mov(x0, 0);
2039  __ Mov(x1, 1);
2040  __ Mov(x10, 0);
2041
2042  __ B(&test_tbz);
2043  __ Bind(&success_tbz);
2044  __ Orr(x0, x0, 1 << 0);
2045  __ B(&test_cbz);
2046  __ Bind(&success_cbz);
2047  __ Orr(x0, x0, 1 << 1);
2048  __ B(&test_bcond);
2049  __ Bind(&success_bcond);
2050  __ Orr(x0, x0, 1 << 2);
2051
2052  __ B(&done);
2053
2054  // Generate enough code to overflow the immediate range of the three types of
2055  // branches below.
2056  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2057    if (i % 100 == 0) {
2058      // If we do land in this code, we do not want to execute so many nops
2059      // before reaching the end of test (especially if tracing is activated).
2060      __ B(&fail);
2061    } else {
2062      __ Nop();
2063    }
2064  }
2065  __ B(&fail);
2066
2067  __ Bind(&test_tbz);
2068  __ Tbz(x10, 7, &success_tbz);
2069  __ Bind(&test_cbz);
2070  __ Cbz(x10, &success_cbz);
2071  __ Bind(&test_bcond);
2072  __ Cmp(x10, 0);
2073  __ B(eq, &success_bcond);
2074
2075  // For each out-of-range branch instructions, at least two instructions should
2076  // have been generated.
2077  CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
2078
2079  __ Bind(&fail);
2080  __ Mov(x1, 0);
2081  __ Bind(&done);
2082
2083  END();
2084
2085  RUN();
2086
2087  ASSERT_EQUAL_64(0x7, x0);
2088  ASSERT_EQUAL_64(0x1, x1);
2089
2090  TEARDOWN();
2091}
2092
2093
2094TEST(far_branch_simple_veneer) {
2095  INIT_V8();
2096
2097  // Test that the MacroAssembler correctly emits veneers for forward branches
2098  // to labels that are outside the immediate range of branch instructions.
2099  int max_range =
2100    std::max(Instruction::ImmBranchRange(TestBranchType),
2101             std::max(Instruction::ImmBranchRange(CompareBranchType),
2102                      Instruction::ImmBranchRange(CondBranchType)));
2103
2104  SETUP_SIZE(max_range + 1000 * kInstructionSize);
2105
2106  START();
2107
2108  Label done, fail;
2109  Label test_tbz, test_cbz, test_bcond;
2110  Label success_tbz, success_cbz, success_bcond;
2111
2112  __ Mov(x0, 0);
2113  __ Mov(x1, 1);
2114  __ Mov(x10, 0);
2115
2116  __ Bind(&test_tbz);
2117  __ Tbz(x10, 7, &success_tbz);
2118  __ Bind(&test_cbz);
2119  __ Cbz(x10, &success_cbz);
2120  __ Bind(&test_bcond);
2121  __ Cmp(x10, 0);
2122  __ B(eq, &success_bcond);
2123
2124  // Generate enough code to overflow the immediate range of the three types of
2125  // branches below.
2126  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2127    if (i % 100 == 0) {
2128      // If we do land in this code, we do not want to execute so many nops
2129      // before reaching the end of test (especially if tracing is activated).
2130      // Also, the branches give the MacroAssembler the opportunity to emit the
2131      // veneers.
2132      __ B(&fail);
2133    } else {
2134      __ Nop();
2135    }
2136  }
2137  __ B(&fail);
2138
2139  __ Bind(&success_tbz);
2140  __ Orr(x0, x0, 1 << 0);
2141  __ B(&test_cbz);
2142  __ Bind(&success_cbz);
2143  __ Orr(x0, x0, 1 << 1);
2144  __ B(&test_bcond);
2145  __ Bind(&success_bcond);
2146  __ Orr(x0, x0, 1 << 2);
2147
2148  __ B(&done);
2149  __ Bind(&fail);
2150  __ Mov(x1, 0);
2151  __ Bind(&done);
2152
2153  END();
2154
2155  RUN();
2156
2157  ASSERT_EQUAL_64(0x7, x0);
2158  ASSERT_EQUAL_64(0x1, x1);
2159
2160  TEARDOWN();
2161}
2162
2163
2164TEST(far_branch_veneer_link_chain) {
2165  INIT_V8();
2166
2167  // Test that the MacroAssembler correctly emits veneers for forward branches
2168  // that target out-of-range labels and are part of multiple instructions
2169  // jumping to that label.
2170  //
2171  // We test the three situations with the different types of instruction:
2172  // (1)- When the branch is at the start of the chain with tbz.
2173  // (2)- When the branch is in the middle of the chain with cbz.
2174  // (3)- When the branch is at the end of the chain with bcond.
2175  int max_range =
2176    std::max(Instruction::ImmBranchRange(TestBranchType),
2177             std::max(Instruction::ImmBranchRange(CompareBranchType),
2178                      Instruction::ImmBranchRange(CondBranchType)));
2179
2180  SETUP_SIZE(max_range + 1000 * kInstructionSize);
2181
2182  START();
2183
2184  Label skip, fail, done;
2185  Label test_tbz, test_cbz, test_bcond;
2186  Label success_tbz, success_cbz, success_bcond;
2187
2188  __ Mov(x0, 0);
2189  __ Mov(x1, 1);
2190  __ Mov(x10, 0);
2191
2192  __ B(&skip);
2193  // Branches at the start of the chain for situations (2) and (3).
2194  __ B(&success_cbz);
2195  __ B(&success_bcond);
2196  __ Nop();
2197  __ B(&success_bcond);
2198  __ B(&success_cbz);
2199  __ Bind(&skip);
2200
2201  __ Bind(&test_tbz);
2202  __ Tbz(x10, 7, &success_tbz);
2203  __ Bind(&test_cbz);
2204  __ Cbz(x10, &success_cbz);
2205  __ Bind(&test_bcond);
2206  __ Cmp(x10, 0);
2207  __ B(eq, &success_bcond);
2208
2209  skip.Unuse();
2210  __ B(&skip);
2211  // Branches at the end of the chain for situations (1) and (2).
2212  __ B(&success_cbz);
2213  __ B(&success_tbz);
2214  __ Nop();
2215  __ B(&success_tbz);
2216  __ B(&success_cbz);
2217  __ Bind(&skip);
2218
2219  // Generate enough code to overflow the immediate range of the three types of
2220  // branches below.
2221  for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2222    if (i % 100 == 0) {
2223      // If we do land in this code, we do not want to execute so many nops
2224      // before reaching the end of test (especially if tracing is activated).
2225      // Also, the branches give the MacroAssembler the opportunity to emit the
2226      // veneers.
2227      __ B(&fail);
2228    } else {
2229      __ Nop();
2230    }
2231  }
2232  __ B(&fail);
2233
2234  __ Bind(&success_tbz);
2235  __ Orr(x0, x0, 1 << 0);
2236  __ B(&test_cbz);
2237  __ Bind(&success_cbz);
2238  __ Orr(x0, x0, 1 << 1);
2239  __ B(&test_bcond);
2240  __ Bind(&success_bcond);
2241  __ Orr(x0, x0, 1 << 2);
2242
2243  __ B(&done);
2244  __ Bind(&fail);
2245  __ Mov(x1, 0);
2246  __ Bind(&done);
2247
2248  END();
2249
2250  RUN();
2251
2252  ASSERT_EQUAL_64(0x7, x0);
2253  ASSERT_EQUAL_64(0x1, x1);
2254
2255  TEARDOWN();
2256}
2257
2258
2259TEST(far_branch_veneer_broken_link_chain) {
2260  INIT_V8();
2261
2262  // Check that the MacroAssembler correctly handles the situation when removing
2263  // a branch from the link chain of a label and the two links on each side of
2264  // the removed branch cannot be linked together (out of range).
2265  //
2266  // We test with tbz because it has a small range.
2267  int max_range = Instruction::ImmBranchRange(TestBranchType);
2268  int inter_range = max_range / 2 + max_range / 10;
2269
2270  SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
2271
2272  START();
2273
2274  Label skip, fail, done;
2275  Label test_1, test_2, test_3;
2276  Label far_target;
2277
2278  __ Mov(x0, 0);  // Indicates the origin of the branch.
2279  __ Mov(x1, 1);
2280  __ Mov(x10, 0);
2281
2282  // First instruction in the label chain.
2283  __ Bind(&test_1);
2284  __ Mov(x0, 1);
2285  __ B(&far_target);
2286
2287  for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2288    if (i % 100 == 0) {
2289      // Do not allow generating veneers. They should not be needed.
2290      __ b(&fail);
2291    } else {
2292      __ Nop();
2293    }
2294  }
2295
2296  // Will need a veneer to point to reach the target.
2297  __ Bind(&test_2);
2298  __ Mov(x0, 2);
2299  __ Tbz(x10, 7, &far_target);
2300
2301  for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2302    if (i % 100 == 0) {
2303      // Do not allow generating veneers. They should not be needed.
2304      __ b(&fail);
2305    } else {
2306      __ Nop();
2307    }
2308  }
2309
2310  // Does not need a veneer to reach the target, but the initial branch
2311  // instruction is out of range.
2312  __ Bind(&test_3);
2313  __ Mov(x0, 3);
2314  __ Tbz(x10, 7, &far_target);
2315
2316  for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2317    if (i % 100 == 0) {
2318      // Allow generating veneers.
2319      __ B(&fail);
2320    } else {
2321      __ Nop();
2322    }
2323  }
2324
2325  __ B(&fail);
2326
2327  __ Bind(&far_target);
2328  __ Cmp(x0, 1);
2329  __ B(eq, &test_2);
2330  __ Cmp(x0, 2);
2331  __ B(eq, &test_3);
2332
2333  __ B(&done);
2334  __ Bind(&fail);
2335  __ Mov(x1, 0);
2336  __ Bind(&done);
2337
2338  END();
2339
2340  RUN();
2341
2342  ASSERT_EQUAL_64(0x3, x0);
2343  ASSERT_EQUAL_64(0x1, x1);
2344
2345  TEARDOWN();
2346}
2347
2348
2349TEST(branch_type) {
2350  INIT_V8();
2351
2352  SETUP();
2353
2354  Label fail, done;
2355
2356  START();
2357  __ Mov(x0, 0x0);
2358  __ Mov(x10, 0x7);
2359  __ Mov(x11, 0x0);
2360
2361  // Test non taken branches.
2362  __ Cmp(x10, 0x7);
2363  __ B(&fail, ne);
2364  __ B(&fail, never);
2365  __ B(&fail, reg_zero, x10);
2366  __ B(&fail, reg_not_zero, x11);
2367  __ B(&fail, reg_bit_clear, x10, 0);
2368  __ B(&fail, reg_bit_set, x10, 3);
2369
2370  // Test taken branches.
2371  Label l1, l2, l3, l4, l5;
2372  __ Cmp(x10, 0x7);
2373  __ B(&l1, eq);
2374  __ B(&fail);
2375  __ Bind(&l1);
2376  __ B(&l2, always);
2377  __ B(&fail);
2378  __ Bind(&l2);
2379  __ B(&l3, reg_not_zero, x10);
2380  __ B(&fail);
2381  __ Bind(&l3);
2382  __ B(&l4, reg_bit_clear, x10, 15);
2383  __ B(&fail);
2384  __ Bind(&l4);
2385  __ B(&l5, reg_bit_set, x10, 1);
2386  __ B(&fail);
2387  __ Bind(&l5);
2388
2389  __ B(&done);
2390
2391  __ Bind(&fail);
2392  __ Mov(x0, 0x1);
2393
2394  __ Bind(&done);
2395
2396  END();
2397
2398  RUN();
2399
2400  ASSERT_EQUAL_64(0x0, x0);
2401
2402  TEARDOWN();
2403}
2404
2405
2406TEST(ldr_str_offset) {
2407  INIT_V8();
2408  SETUP();
2409
2410  uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2411  uint64_t dst[5] = {0, 0, 0, 0, 0};
2412  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2413  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2414
2415  START();
2416  __ Mov(x17, src_base);
2417  __ Mov(x18, dst_base);
2418  __ Ldr(w0, MemOperand(x17));
2419  __ Str(w0, MemOperand(x18));
2420  __ Ldr(w1, MemOperand(x17, 4));
2421  __ Str(w1, MemOperand(x18, 12));
2422  __ Ldr(x2, MemOperand(x17, 8));
2423  __ Str(x2, MemOperand(x18, 16));
2424  __ Ldrb(w3, MemOperand(x17, 1));
2425  __ Strb(w3, MemOperand(x18, 25));
2426  __ Ldrh(w4, MemOperand(x17, 2));
2427  __ Strh(w4, MemOperand(x18, 33));
2428  END();
2429
2430  RUN();
2431
2432  ASSERT_EQUAL_64(0x76543210, x0);
2433  ASSERT_EQUAL_64(0x76543210, dst[0]);
2434  ASSERT_EQUAL_64(0xfedcba98, x1);
2435  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2436  ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2437  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2438  ASSERT_EQUAL_64(0x32, x3);
2439  ASSERT_EQUAL_64(0x3200, dst[3]);
2440  ASSERT_EQUAL_64(0x7654, x4);
2441  ASSERT_EQUAL_64(0x765400, dst[4]);
2442  ASSERT_EQUAL_64(src_base, x17);
2443  ASSERT_EQUAL_64(dst_base, x18);
2444
2445  TEARDOWN();
2446}
2447
2448
2449TEST(ldr_str_wide) {
2450  INIT_V8();
2451  SETUP();
2452
2453  uint32_t src[8192];
2454  uint32_t dst[8192];
2455  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2456  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2457  memset(src, 0xaa, 8192 * sizeof(src[0]));
2458  memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2459  src[0] = 0;
2460  src[6144] = 6144;
2461  src[8191] = 8191;
2462
2463  START();
2464  __ Mov(x22, src_base);
2465  __ Mov(x23, dst_base);
2466  __ Mov(x24, src_base);
2467  __ Mov(x25, dst_base);
2468  __ Mov(x26, src_base);
2469  __ Mov(x27, dst_base);
2470
2471  __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2472  __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2473  __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2474  __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2475  __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2476  __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2477  END();
2478
2479  RUN();
2480
2481  ASSERT_EQUAL_32(8191, w0);
2482  ASSERT_EQUAL_32(8191, dst[8191]);
2483  ASSERT_EQUAL_64(src_base, x22);
2484  ASSERT_EQUAL_64(dst_base, x23);
2485  ASSERT_EQUAL_32(0, w1);
2486  ASSERT_EQUAL_32(0, dst[0]);
2487  ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2488  ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2489  ASSERT_EQUAL_32(6144, w2);
2490  ASSERT_EQUAL_32(6144, dst[6144]);
2491  ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2492  ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2493
2494  TEARDOWN();
2495}
2496
2497
2498TEST(ldr_str_preindex) {
2499  INIT_V8();
2500  SETUP();
2501
2502  uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2503  uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2504  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2505  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2506
2507  START();
2508  __ Mov(x17, src_base);
2509  __ Mov(x18, dst_base);
2510  __ Mov(x19, src_base);
2511  __ Mov(x20, dst_base);
2512  __ Mov(x21, src_base + 16);
2513  __ Mov(x22, dst_base + 40);
2514  __ Mov(x23, src_base);
2515  __ Mov(x24, dst_base);
2516  __ Mov(x25, src_base);
2517  __ Mov(x26, dst_base);
2518  __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2519  __ Str(w0, MemOperand(x18, 12, PreIndex));
2520  __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2521  __ Str(x1, MemOperand(x20, 16, PreIndex));
2522  __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2523  __ Str(w2, MemOperand(x22, -4, PreIndex));
2524  __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2525  __ Strb(w3, MemOperand(x24, 25, PreIndex));
2526  __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2527  __ Strh(w4, MemOperand(x26, 41, PreIndex));
2528  END();
2529
2530  RUN();
2531
2532  ASSERT_EQUAL_64(0xfedcba98, x0);
2533  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2534  ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2535  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2536  ASSERT_EQUAL_64(0x01234567, x2);
2537  ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
2538  ASSERT_EQUAL_64(0x32, x3);
2539  ASSERT_EQUAL_64(0x3200, dst[3]);
2540  ASSERT_EQUAL_64(0x9876, x4);
2541  ASSERT_EQUAL_64(0x987600, dst[5]);
2542  ASSERT_EQUAL_64(src_base + 4, x17);
2543  ASSERT_EQUAL_64(dst_base + 12, x18);
2544  ASSERT_EQUAL_64(src_base + 8, x19);
2545  ASSERT_EQUAL_64(dst_base + 16, x20);
2546  ASSERT_EQUAL_64(src_base + 12, x21);
2547  ASSERT_EQUAL_64(dst_base + 36, x22);
2548  ASSERT_EQUAL_64(src_base + 1, x23);
2549  ASSERT_EQUAL_64(dst_base + 25, x24);
2550  ASSERT_EQUAL_64(src_base + 3, x25);
2551  ASSERT_EQUAL_64(dst_base + 41, x26);
2552
2553  TEARDOWN();
2554}
2555
2556
2557TEST(ldr_str_postindex) {
2558  INIT_V8();
2559  SETUP();
2560
2561  uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2562  uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2563  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2564  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2565
2566  START();
2567  __ Mov(x17, src_base + 4);
2568  __ Mov(x18, dst_base + 12);
2569  __ Mov(x19, src_base + 8);
2570  __ Mov(x20, dst_base + 16);
2571  __ Mov(x21, src_base + 8);
2572  __ Mov(x22, dst_base + 32);
2573  __ Mov(x23, src_base + 1);
2574  __ Mov(x24, dst_base + 25);
2575  __ Mov(x25, src_base + 3);
2576  __ Mov(x26, dst_base + 41);
2577  __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2578  __ Str(w0, MemOperand(x18, 12, PostIndex));
2579  __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2580  __ Str(x1, MemOperand(x20, 16, PostIndex));
2581  __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2582  __ Str(x2, MemOperand(x22, -32, PostIndex));
2583  __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2584  __ Strb(w3, MemOperand(x24, 5, PostIndex));
2585  __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2586  __ Strh(w4, MemOperand(x26, -41, PostIndex));
2587  END();
2588
2589  RUN();
2590
2591  ASSERT_EQUAL_64(0xfedcba98, x0);
2592  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2593  ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
2594  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2595  ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
2596  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
2597  ASSERT_EQUAL_64(0x32, x3);
2598  ASSERT_EQUAL_64(0x3200, dst[3]);
2599  ASSERT_EQUAL_64(0x9876, x4);
2600  ASSERT_EQUAL_64(0x987600, dst[5]);
2601  ASSERT_EQUAL_64(src_base + 8, x17);
2602  ASSERT_EQUAL_64(dst_base + 24, x18);
2603  ASSERT_EQUAL_64(src_base + 16, x19);
2604  ASSERT_EQUAL_64(dst_base + 32, x20);
2605  ASSERT_EQUAL_64(src_base, x21);
2606  ASSERT_EQUAL_64(dst_base, x22);
2607  ASSERT_EQUAL_64(src_base + 2, x23);
2608  ASSERT_EQUAL_64(dst_base + 30, x24);
2609  ASSERT_EQUAL_64(src_base, x25);
2610  ASSERT_EQUAL_64(dst_base, x26);
2611
2612  TEARDOWN();
2613}
2614
2615
2616TEST(load_signed) {
2617  INIT_V8();
2618  SETUP();
2619
2620  uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2621  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2622
2623  START();
2624  __ Mov(x24, src_base);
2625  __ Ldrsb(w0, MemOperand(x24));
2626  __ Ldrsb(w1, MemOperand(x24, 4));
2627  __ Ldrsh(w2, MemOperand(x24));
2628  __ Ldrsh(w3, MemOperand(x24, 4));
2629  __ Ldrsb(x4, MemOperand(x24));
2630  __ Ldrsb(x5, MemOperand(x24, 4));
2631  __ Ldrsh(x6, MemOperand(x24));
2632  __ Ldrsh(x7, MemOperand(x24, 4));
2633  __ Ldrsw(x8, MemOperand(x24));
2634  __ Ldrsw(x9, MemOperand(x24, 4));
2635  END();
2636
2637  RUN();
2638
2639  ASSERT_EQUAL_64(0xffffff80, x0);
2640  ASSERT_EQUAL_64(0x0000007f, x1);
2641  ASSERT_EQUAL_64(0xffff8080, x2);
2642  ASSERT_EQUAL_64(0x00007f7f, x3);
2643  ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
2644  ASSERT_EQUAL_64(0x000000000000007fUL, x5);
2645  ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
2646  ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
2647  ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
2648  ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
2649
2650  TEARDOWN();
2651}
2652
2653
2654TEST(load_store_regoffset) {
2655  INIT_V8();
2656  SETUP();
2657
2658  uint32_t src[3] = {1, 2, 3};
2659  uint32_t dst[4] = {0, 0, 0, 0};
2660  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2661  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2662
2663  START();
2664  __ Mov(x16, src_base);
2665  __ Mov(x17, dst_base);
2666  __ Mov(x18, src_base + 3 * sizeof(src[0]));
2667  __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2668  __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2669  __ Mov(x24, 0);
2670  __ Mov(x25, 4);
2671  __ Mov(x26, -4);
2672  __ Mov(x27, 0xfffffffc);  // 32-bit -4.
2673  __ Mov(x28, 0xfffffffe);  // 32-bit -2.
2674  __ Mov(x29, 0xffffffff);  // 32-bit -1.
2675
2676  __ Ldr(w0, MemOperand(x16, x24));
2677  __ Ldr(x1, MemOperand(x16, x25));
2678  __ Ldr(w2, MemOperand(x18, x26));
2679  __ Ldr(w3, MemOperand(x18, x27, SXTW));
2680  __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2681  __ Str(w0, MemOperand(x17, x24));
2682  __ Str(x1, MemOperand(x17, x25));
2683  __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2684  END();
2685
2686  RUN();
2687
2688  ASSERT_EQUAL_64(1, x0);
2689  ASSERT_EQUAL_64(0x0000000300000002UL, x1);
2690  ASSERT_EQUAL_64(3, x2);
2691  ASSERT_EQUAL_64(3, x3);
2692  ASSERT_EQUAL_64(2, x4);
2693  ASSERT_EQUAL_32(1, dst[0]);
2694  ASSERT_EQUAL_32(2, dst[1]);
2695  ASSERT_EQUAL_32(3, dst[2]);
2696  ASSERT_EQUAL_32(3, dst[3]);
2697
2698  TEARDOWN();
2699}
2700
2701
2702TEST(load_store_float) {
2703  INIT_V8();
2704  SETUP();
2705
2706  float src[3] = {1.0, 2.0, 3.0};
2707  float dst[3] = {0.0, 0.0, 0.0};
2708  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2709  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2710
2711  START();
2712  __ Mov(x17, src_base);
2713  __ Mov(x18, dst_base);
2714  __ Mov(x19, src_base);
2715  __ Mov(x20, dst_base);
2716  __ Mov(x21, src_base);
2717  __ Mov(x22, dst_base);
2718  __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2719  __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2720  __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2721  __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2722  __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2723  __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2724  END();
2725
2726  RUN();
2727
2728  ASSERT_EQUAL_FP32(2.0, s0);
2729  ASSERT_EQUAL_FP32(2.0, dst[0]);
2730  ASSERT_EQUAL_FP32(1.0, s1);
2731  ASSERT_EQUAL_FP32(1.0, dst[2]);
2732  ASSERT_EQUAL_FP32(3.0, s2);
2733  ASSERT_EQUAL_FP32(3.0, dst[1]);
2734  ASSERT_EQUAL_64(src_base, x17);
2735  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2736  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2737  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2738  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2739  ASSERT_EQUAL_64(dst_base, x22);
2740
2741  TEARDOWN();
2742}
2743
2744
2745TEST(load_store_double) {
2746  INIT_V8();
2747  SETUP();
2748
2749  double src[3] = {1.0, 2.0, 3.0};
2750  double dst[3] = {0.0, 0.0, 0.0};
2751  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2752  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2753
2754  START();
2755  __ Mov(x17, src_base);
2756  __ Mov(x18, dst_base);
2757  __ Mov(x19, src_base);
2758  __ Mov(x20, dst_base);
2759  __ Mov(x21, src_base);
2760  __ Mov(x22, dst_base);
2761  __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2762  __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2763  __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2764  __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2765  __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2766  __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2767  END();
2768
2769  RUN();
2770
2771  ASSERT_EQUAL_FP64(2.0, d0);
2772  ASSERT_EQUAL_FP64(2.0, dst[0]);
2773  ASSERT_EQUAL_FP64(1.0, d1);
2774  ASSERT_EQUAL_FP64(1.0, dst[2]);
2775  ASSERT_EQUAL_FP64(3.0, d2);
2776  ASSERT_EQUAL_FP64(3.0, dst[1]);
2777  ASSERT_EQUAL_64(src_base, x17);
2778  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2779  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2780  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2781  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2782  ASSERT_EQUAL_64(dst_base, x22);
2783
2784  TEARDOWN();
2785}
2786
2787
2788TEST(ldp_stp_float) {
2789  INIT_V8();
2790  SETUP();
2791
2792  float src[2] = {1.0, 2.0};
2793  float dst[3] = {0.0, 0.0, 0.0};
2794  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2795  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2796
2797  START();
2798  __ Mov(x16, src_base);
2799  __ Mov(x17, dst_base);
2800  __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2801  __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2802  END();
2803
2804  RUN();
2805
2806  ASSERT_EQUAL_FP32(1.0, s31);
2807  ASSERT_EQUAL_FP32(2.0, s0);
2808  ASSERT_EQUAL_FP32(0.0, dst[0]);
2809  ASSERT_EQUAL_FP32(2.0, dst[1]);
2810  ASSERT_EQUAL_FP32(1.0, dst[2]);
2811  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2812  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2813
2814  TEARDOWN();
2815}
2816
2817
2818TEST(ldp_stp_double) {
2819  INIT_V8();
2820  SETUP();
2821
2822  double src[2] = {1.0, 2.0};
2823  double dst[3] = {0.0, 0.0, 0.0};
2824  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2825  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2826
2827  START();
2828  __ Mov(x16, src_base);
2829  __ Mov(x17, dst_base);
2830  __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2831  __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2832  END();
2833
2834  RUN();
2835
2836  ASSERT_EQUAL_FP64(1.0, d31);
2837  ASSERT_EQUAL_FP64(2.0, d0);
2838  ASSERT_EQUAL_FP64(0.0, dst[0]);
2839  ASSERT_EQUAL_FP64(2.0, dst[1]);
2840  ASSERT_EQUAL_FP64(1.0, dst[2]);
2841  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2842  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2843
2844  TEARDOWN();
2845}
2846
2847
2848TEST(ldp_stp_offset) {
2849  INIT_V8();
2850  SETUP();
2851
2852  uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2853                     0xffeeddccbbaa9988UL};
2854  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2855  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2856  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2857
2858  START();
2859  __ Mov(x16, src_base);
2860  __ Mov(x17, dst_base);
2861  __ Mov(x18, src_base + 24);
2862  __ Mov(x19, dst_base + 56);
2863  __ Ldp(w0, w1, MemOperand(x16));
2864  __ Ldp(w2, w3, MemOperand(x16, 4));
2865  __ Ldp(x4, x5, MemOperand(x16, 8));
2866  __ Ldp(w6, w7, MemOperand(x18, -12));
2867  __ Ldp(x8, x9, MemOperand(x18, -16));
2868  __ Stp(w0, w1, MemOperand(x17));
2869  __ Stp(w2, w3, MemOperand(x17, 8));
2870  __ Stp(x4, x5, MemOperand(x17, 16));
2871  __ Stp(w6, w7, MemOperand(x19, -24));
2872  __ Stp(x8, x9, MemOperand(x19, -16));
2873  END();
2874
2875  RUN();
2876
2877  ASSERT_EQUAL_64(0x44556677, x0);
2878  ASSERT_EQUAL_64(0x00112233, x1);
2879  ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2880  ASSERT_EQUAL_64(0x00112233, x2);
2881  ASSERT_EQUAL_64(0xccddeeff, x3);
2882  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2883  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2884  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2885  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2886  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2887  ASSERT_EQUAL_64(0x8899aabb, x6);
2888  ASSERT_EQUAL_64(0xbbaa9988, x7);
2889  ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2890  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2891  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2892  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2893  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2894  ASSERT_EQUAL_64(src_base, x16);
2895  ASSERT_EQUAL_64(dst_base, x17);
2896  ASSERT_EQUAL_64(src_base + 24, x18);
2897  ASSERT_EQUAL_64(dst_base + 56, x19);
2898
2899  TEARDOWN();
2900}
2901
2902
2903TEST(ldnp_stnp_offset) {
2904  INIT_V8();
2905  SETUP();
2906
2907  uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2908                     0xffeeddccbbaa9988UL};
2909  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2910  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2911  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2912
2913  START();
2914  __ Mov(x16, src_base);
2915  __ Mov(x17, dst_base);
2916  __ Mov(x18, src_base + 24);
2917  __ Mov(x19, dst_base + 56);
2918  __ Ldnp(w0, w1, MemOperand(x16));
2919  __ Ldnp(w2, w3, MemOperand(x16, 4));
2920  __ Ldnp(x4, x5, MemOperand(x16, 8));
2921  __ Ldnp(w6, w7, MemOperand(x18, -12));
2922  __ Ldnp(x8, x9, MemOperand(x18, -16));
2923  __ Stnp(w0, w1, MemOperand(x17));
2924  __ Stnp(w2, w3, MemOperand(x17, 8));
2925  __ Stnp(x4, x5, MemOperand(x17, 16));
2926  __ Stnp(w6, w7, MemOperand(x19, -24));
2927  __ Stnp(x8, x9, MemOperand(x19, -16));
2928  END();
2929
2930  RUN();
2931
2932  ASSERT_EQUAL_64(0x44556677, x0);
2933  ASSERT_EQUAL_64(0x00112233, x1);
2934  ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
2935  ASSERT_EQUAL_64(0x00112233, x2);
2936  ASSERT_EQUAL_64(0xccddeeff, x3);
2937  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2938  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2939  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2940  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2941  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2942  ASSERT_EQUAL_64(0x8899aabb, x6);
2943  ASSERT_EQUAL_64(0xbbaa9988, x7);
2944  ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2945  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
2946  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2947  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2948  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2949  ASSERT_EQUAL_64(src_base, x16);
2950  ASSERT_EQUAL_64(dst_base, x17);
2951  ASSERT_EQUAL_64(src_base + 24, x18);
2952  ASSERT_EQUAL_64(dst_base + 56, x19);
2953
2954  TEARDOWN();
2955}
2956
2957
2958TEST(ldp_stp_preindex) {
2959  INIT_V8();
2960  SETUP();
2961
2962  uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2963                     0xffeeddccbbaa9988UL};
2964  uint64_t dst[5] = {0, 0, 0, 0, 0};
2965  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2966  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2967
2968  START();
2969  __ Mov(x16, src_base);
2970  __ Mov(x17, dst_base);
2971  __ Mov(x18, dst_base + 16);
2972  __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2973  __ Mov(x19, x16);
2974  __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2975  __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2976  __ Mov(x20, x17);
2977  __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2978  __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2979  __ Mov(x21, x16);
2980  __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2981  __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2982  __ Mov(x22, x18);
2983  __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
2984  END();
2985
2986  RUN();
2987
2988  ASSERT_EQUAL_64(0x00112233, x0);
2989  ASSERT_EQUAL_64(0xccddeeff, x1);
2990  ASSERT_EQUAL_64(0x44556677, x2);
2991  ASSERT_EQUAL_64(0x00112233, x3);
2992  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
2993  ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
2994  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
2995  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2996  ASSERT_EQUAL_64(0x0011223344556677UL, x6);
2997  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
2998  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
2999  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3000  ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
3001  ASSERT_EQUAL_64(src_base, x16);
3002  ASSERT_EQUAL_64(dst_base, x17);
3003  ASSERT_EQUAL_64(dst_base + 16, x18);
3004  ASSERT_EQUAL_64(src_base + 4, x19);
3005  ASSERT_EQUAL_64(dst_base + 4, x20);
3006  ASSERT_EQUAL_64(src_base + 8, x21);
3007  ASSERT_EQUAL_64(dst_base + 24, x22);
3008
3009  TEARDOWN();
3010}
3011
3012
3013TEST(ldp_stp_postindex) {
3014  INIT_V8();
3015  SETUP();
3016
3017  uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
3018                     0xffeeddccbbaa9988UL, 0x7766554433221100UL};
3019  uint64_t dst[5] = {0, 0, 0, 0, 0};
3020  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3021  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3022
3023  START();
3024  __ Mov(x16, src_base);
3025  __ Mov(x17, dst_base);
3026  __ Mov(x18, dst_base + 16);
3027  __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
3028  __ Mov(x19, x16);
3029  __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
3030  __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
3031  __ Mov(x20, x17);
3032  __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
3033  __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
3034  __ Mov(x21, x16);
3035  __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
3036  __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
3037  __ Mov(x22, x18);
3038  __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
3039  END();
3040
3041  RUN();
3042
3043  ASSERT_EQUAL_64(0x44556677, x0);
3044  ASSERT_EQUAL_64(0x00112233, x1);
3045  ASSERT_EQUAL_64(0x00112233, x2);
3046  ASSERT_EQUAL_64(0xccddeeff, x3);
3047  ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
3048  ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
3049  ASSERT_EQUAL_64(0x0011223344556677UL, x4);
3050  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
3051  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
3052  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
3053  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3054  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3055  ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
3056  ASSERT_EQUAL_64(src_base, x16);
3057  ASSERT_EQUAL_64(dst_base, x17);
3058  ASSERT_EQUAL_64(dst_base + 16, x18);
3059  ASSERT_EQUAL_64(src_base + 4, x19);
3060  ASSERT_EQUAL_64(dst_base + 4, x20);
3061  ASSERT_EQUAL_64(src_base + 8, x21);
3062  ASSERT_EQUAL_64(dst_base + 24, x22);
3063
3064  TEARDOWN();
3065}
3066
3067
3068TEST(ldp_sign_extend) {
3069  INIT_V8();
3070  SETUP();
3071
3072  uint32_t src[2] = {0x80000000, 0x7fffffff};
3073  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3074
3075  START();
3076  __ Mov(x24, src_base);
3077  __ Ldpsw(x0, x1, MemOperand(x24));
3078  END();
3079
3080  RUN();
3081
3082  ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
3083  ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
3084
3085  TEARDOWN();
3086}
3087
3088
3089TEST(ldur_stur) {
3090  INIT_V8();
3091  SETUP();
3092
3093  int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
3094  int64_t dst[5] = {0, 0, 0, 0, 0};
3095  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3096  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3097
3098  START();
3099  __ Mov(x17, src_base);
3100  __ Mov(x18, dst_base);
3101  __ Mov(x19, src_base + 16);
3102  __ Mov(x20, dst_base + 32);
3103  __ Mov(x21, dst_base + 40);
3104  __ Ldr(w0, MemOperand(x17, 1));
3105  __ Str(w0, MemOperand(x18, 2));
3106  __ Ldr(x1, MemOperand(x17, 3));
3107  __ Str(x1, MemOperand(x18, 9));
3108  __ Ldr(w2, MemOperand(x19, -9));
3109  __ Str(w2, MemOperand(x20, -5));
3110  __ Ldrb(w3, MemOperand(x19, -1));
3111  __ Strb(w3, MemOperand(x21, -1));
3112  END();
3113
3114  RUN();
3115
3116  ASSERT_EQUAL_64(0x6789abcd, x0);
3117  ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
3118  ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
3119  ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
3120  ASSERT_EQUAL_64(0x000000ab, dst[2]);
3121  ASSERT_EQUAL_64(0xabcdef01, x2);
3122  ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
3123  ASSERT_EQUAL_64(0x00000001, x3);
3124  ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
3125  ASSERT_EQUAL_64(src_base, x17);
3126  ASSERT_EQUAL_64(dst_base, x18);
3127  ASSERT_EQUAL_64(src_base + 16, x19);
3128  ASSERT_EQUAL_64(dst_base + 32, x20);
3129
3130  TEARDOWN();
3131}
3132
3133
3134#if 0  // TODO(all) enable.
3135// TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
3136TEST(ldr_literal) {
3137  INIT_V8();
3138  SETUP();
3139
3140  START();
3141  __ Ldr(x2, 0x1234567890abcdefUL);
3142  __ Ldr(w3, 0xfedcba09);
3143  __ Ldr(d13, 1.234);
3144  __ Ldr(s25, 2.5);
3145  END();
3146
3147  RUN();
3148
3149  ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
3150  ASSERT_EQUAL_64(0xfedcba09, x3);
3151  ASSERT_EQUAL_FP64(1.234, d13);
3152  ASSERT_EQUAL_FP32(2.5, s25);
3153
3154  TEARDOWN();
3155}
3156
3157
3158static void LdrLiteralRangeHelper(ptrdiff_t range_,
3159                                  LiteralPoolEmitOption option,
3160                                  bool expect_dump) {
3161  ASSERT(range_ > 0);
3162  SETUP_SIZE(range_ + 1024);
3163
3164  Label label_1, label_2;
3165
3166  size_t range = static_cast<size_t>(range_);
3167  size_t code_size = 0;
3168  size_t pool_guard_size;
3169
3170  if (option == NoJumpRequired) {
3171    // Space for an explicit branch.
3172    pool_guard_size = sizeof(Instr);
3173  } else {
3174    pool_guard_size = 0;
3175  }
3176
3177  START();
3178  // Force a pool dump so the pool starts off empty.
3179  __ EmitLiteralPool(JumpRequired);
3180  ASSERT_LITERAL_POOL_SIZE(0);
3181
3182  __ Ldr(x0, 0x1234567890abcdefUL);
3183  __ Ldr(w1, 0xfedcba09);
3184  __ Ldr(d0, 1.234);
3185  __ Ldr(s1, 2.5);
3186  ASSERT_LITERAL_POOL_SIZE(4);
3187
3188  code_size += 4 * sizeof(Instr);
3189
3190  // Check that the requested range (allowing space for a branch over the pool)
3191  // can be handled by this test.
3192  ASSERT((code_size + pool_guard_size) <= range);
3193
3194  // Emit NOPs up to 'range', leaving space for the pool guard.
3195  while ((code_size + pool_guard_size) < range) {
3196    __ Nop();
3197    code_size += sizeof(Instr);
3198  }
3199
3200  // Emit the guard sequence before the literal pool.
3201  if (option == NoJumpRequired) {
3202    __ B(&label_1);
3203    code_size += sizeof(Instr);
3204  }
3205
3206  ASSERT(code_size == range);
3207  ASSERT_LITERAL_POOL_SIZE(4);
3208
3209  // Possibly generate a literal pool.
3210  __ CheckLiteralPool(option);
3211  __ Bind(&label_1);
3212  if (expect_dump) {
3213    ASSERT_LITERAL_POOL_SIZE(0);
3214  } else {
3215    ASSERT_LITERAL_POOL_SIZE(4);
3216  }
3217
3218  // Force a pool flush to check that a second pool functions correctly.
3219  __ EmitLiteralPool(JumpRequired);
3220  ASSERT_LITERAL_POOL_SIZE(0);
3221
3222  // These loads should be after the pool (and will require a new one).
3223  __ Ldr(x4, 0x34567890abcdef12UL);
3224  __ Ldr(w5, 0xdcba09fe);
3225  __ Ldr(d4, 123.4);
3226  __ Ldr(s5, 250.0);
3227  ASSERT_LITERAL_POOL_SIZE(4);
3228  END();
3229
3230  RUN();
3231
3232  // Check that the literals loaded correctly.
3233  ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
3234  ASSERT_EQUAL_64(0xfedcba09, x1);
3235  ASSERT_EQUAL_FP64(1.234, d0);
3236  ASSERT_EQUAL_FP32(2.5, s1);
3237  ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
3238  ASSERT_EQUAL_64(0xdcba09fe, x5);
3239  ASSERT_EQUAL_FP64(123.4, d4);
3240  ASSERT_EQUAL_FP32(250.0, s5);
3241
3242  TEARDOWN();
3243}
3244
3245
3246TEST(ldr_literal_range_1) {
3247  INIT_V8();
3248  LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
3249                        NoJumpRequired,
3250                        true);
3251}
3252
3253
3254TEST(ldr_literal_range_2) {
3255  INIT_V8();
3256  LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
3257                        NoJumpRequired,
3258                        false);
3259}
3260
3261
3262TEST(ldr_literal_range_3) {
3263  INIT_V8();
3264  LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
3265                        JumpRequired,
3266                        true);
3267}
3268
3269
3270TEST(ldr_literal_range_4) {
3271  INIT_V8();
3272  LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
3273                        JumpRequired,
3274                        false);
3275}
3276
3277
3278TEST(ldr_literal_range_5) {
3279  INIT_V8();
3280  LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
3281                        JumpRequired,
3282                        false);
3283}
3284
3285
3286TEST(ldr_literal_range_6) {
3287  INIT_V8();
3288  LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
3289                        JumpRequired,
3290                        false);
3291}
3292#endif
3293
3294TEST(add_sub_imm) {
3295  INIT_V8();
3296  SETUP();
3297
3298  START();
3299  __ Mov(x0, 0x0);
3300  __ Mov(x1, 0x1111);
3301  __ Mov(x2, 0xffffffffffffffffL);
3302  __ Mov(x3, 0x8000000000000000L);
3303
3304  __ Add(x10, x0, Operand(0x123));
3305  __ Add(x11, x1, Operand(0x122000));
3306  __ Add(x12, x0, Operand(0xabc << 12));
3307  __ Add(x13, x2, Operand(1));
3308
3309  __ Add(w14, w0, Operand(0x123));
3310  __ Add(w15, w1, Operand(0x122000));
3311  __ Add(w16, w0, Operand(0xabc << 12));
3312  __ Add(w17, w2, Operand(1));
3313
3314  __ Sub(x20, x0, Operand(0x1));
3315  __ Sub(x21, x1, Operand(0x111));
3316  __ Sub(x22, x1, Operand(0x1 << 12));
3317  __ Sub(x23, x3, Operand(1));
3318
3319  __ Sub(w24, w0, Operand(0x1));
3320  __ Sub(w25, w1, Operand(0x111));
3321  __ Sub(w26, w1, Operand(0x1 << 12));
3322  __ Sub(w27, w3, Operand(1));
3323  END();
3324
3325  RUN();
3326
3327  ASSERT_EQUAL_64(0x123, x10);
3328  ASSERT_EQUAL_64(0x123111, x11);
3329  ASSERT_EQUAL_64(0xabc000, x12);
3330  ASSERT_EQUAL_64(0x0, x13);
3331
3332  ASSERT_EQUAL_32(0x123, w14);
3333  ASSERT_EQUAL_32(0x123111, w15);
3334  ASSERT_EQUAL_32(0xabc000, w16);
3335  ASSERT_EQUAL_32(0x0, w17);
3336
3337  ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
3338  ASSERT_EQUAL_64(0x1000, x21);
3339  ASSERT_EQUAL_64(0x111, x22);
3340  ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
3341
3342  ASSERT_EQUAL_32(0xffffffff, w24);
3343  ASSERT_EQUAL_32(0x1000, w25);
3344  ASSERT_EQUAL_32(0x111, w26);
3345  ASSERT_EQUAL_32(0xffffffff, w27);
3346
3347  TEARDOWN();
3348}
3349
3350
3351TEST(add_sub_wide_imm) {
3352  INIT_V8();
3353  SETUP();
3354
3355  START();
3356  __ Mov(x0, 0x0);
3357  __ Mov(x1, 0x1);
3358
3359  __ Add(x10, x0, Operand(0x1234567890abcdefUL));
3360  __ Add(x11, x1, Operand(0xffffffff));
3361
3362  __ Add(w12, w0, Operand(0x12345678));
3363  __ Add(w13, w1, Operand(0xffffffff));
3364
3365  __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
3366
3367  __ Sub(w21, w0, Operand(0x12345678));
3368  END();
3369
3370  RUN();
3371
3372  ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
3373  ASSERT_EQUAL_64(0x100000000UL, x11);
3374
3375  ASSERT_EQUAL_32(0x12345678, w12);
3376  ASSERT_EQUAL_64(0x0, x13);
3377
3378  ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
3379
3380  ASSERT_EQUAL_32(-0x12345678, w21);
3381
3382  TEARDOWN();
3383}
3384
3385
3386TEST(add_sub_shifted) {
3387  INIT_V8();
3388  SETUP();
3389
3390  START();
3391  __ Mov(x0, 0);
3392  __ Mov(x1, 0x0123456789abcdefL);
3393  __ Mov(x2, 0xfedcba9876543210L);
3394  __ Mov(x3, 0xffffffffffffffffL);
3395
3396  __ Add(x10, x1, Operand(x2));
3397  __ Add(x11, x0, Operand(x1, LSL, 8));
3398  __ Add(x12, x0, Operand(x1, LSR, 8));
3399  __ Add(x13, x0, Operand(x1, ASR, 8));
3400  __ Add(x14, x0, Operand(x2, ASR, 8));
3401  __ Add(w15, w0, Operand(w1, ASR, 8));
3402  __ Add(w18, w3, Operand(w1, ROR, 8));
3403  __ Add(x19, x3, Operand(x1, ROR, 8));
3404
3405  __ Sub(x20, x3, Operand(x2));
3406  __ Sub(x21, x3, Operand(x1, LSL, 8));
3407  __ Sub(x22, x3, Operand(x1, LSR, 8));
3408  __ Sub(x23, x3, Operand(x1, ASR, 8));
3409  __ Sub(x24, x3, Operand(x2, ASR, 8));
3410  __ Sub(w25, w3, Operand(w1, ASR, 8));
3411  __ Sub(w26, w3, Operand(w1, ROR, 8));
3412  __ Sub(x27, x3, Operand(x1, ROR, 8));
3413  END();
3414
3415  RUN();
3416
3417  ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3418  ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
3419  ASSERT_EQUAL_64(0x000123456789abcdL, x12);
3420  ASSERT_EQUAL_64(0x000123456789abcdL, x13);
3421  ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
3422  ASSERT_EQUAL_64(0xff89abcd, x15);
3423  ASSERT_EQUAL_64(0xef89abcc, x18);
3424  ASSERT_EQUAL_64(0xef0123456789abccL, x19);
3425
3426  ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
3427  ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
3428  ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
3429  ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
3430  ASSERT_EQUAL_64(0x000123456789abcdL, x24);
3431  ASSERT_EQUAL_64(0x00765432, x25);
3432  ASSERT_EQUAL_64(0x10765432, x26);
3433  ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
3434
3435  TEARDOWN();
3436}
3437
3438
3439TEST(add_sub_extended) {
3440  INIT_V8();
3441  SETUP();
3442
3443  START();
3444  __ Mov(x0, 0);
3445  __ Mov(x1, 0x0123456789abcdefL);
3446  __ Mov(x2, 0xfedcba9876543210L);
3447  __ Mov(w3, 0x80);
3448
3449  __ Add(x10, x0, Operand(x1, UXTB, 0));
3450  __ Add(x11, x0, Operand(x1, UXTB, 1));
3451  __ Add(x12, x0, Operand(x1, UXTH, 2));
3452  __ Add(x13, x0, Operand(x1, UXTW, 4));
3453
3454  __ Add(x14, x0, Operand(x1, SXTB, 0));
3455  __ Add(x15, x0, Operand(x1, SXTB, 1));
3456  __ Add(x16, x0, Operand(x1, SXTH, 2));
3457  __ Add(x17, x0, Operand(x1, SXTW, 3));
3458  __ Add(x18, x0, Operand(x2, SXTB, 0));
3459  __ Add(x19, x0, Operand(x2, SXTB, 1));
3460  __ Add(x20, x0, Operand(x2, SXTH, 2));
3461  __ Add(x21, x0, Operand(x2, SXTW, 3));
3462
3463  __ Add(x22, x1, Operand(x2, SXTB, 1));
3464  __ Sub(x23, x1, Operand(x2, SXTB, 1));
3465
3466  __ Add(w24, w1, Operand(w2, UXTB, 2));
3467  __ Add(w25, w0, Operand(w1, SXTB, 0));
3468  __ Add(w26, w0, Operand(w1, SXTB, 1));
3469  __ Add(w27, w2, Operand(w1, SXTW, 3));
3470
3471  __ Add(w28, w0, Operand(w1, SXTW, 3));
3472  __ Add(x29, x0, Operand(w1, SXTW, 3));
3473
3474  __ Sub(x30, x0, Operand(w3, SXTB, 1));
3475  END();
3476
3477  RUN();
3478
3479  ASSERT_EQUAL_64(0xefL, x10);
3480  ASSERT_EQUAL_64(0x1deL, x11);
3481  ASSERT_EQUAL_64(0x337bcL, x12);
3482  ASSERT_EQUAL_64(0x89abcdef0L, x13);
3483
3484  ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
3485  ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
3486  ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
3487  ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
3488  ASSERT_EQUAL_64(0x10L, x18);
3489  ASSERT_EQUAL_64(0x20L, x19);
3490  ASSERT_EQUAL_64(0xc840L, x20);
3491  ASSERT_EQUAL_64(0x3b2a19080L, x21);
3492
3493  ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
3494  ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
3495
3496  ASSERT_EQUAL_32(0x89abce2f, w24);
3497  ASSERT_EQUAL_32(0xffffffef, w25);
3498  ASSERT_EQUAL_32(0xffffffde, w26);
3499  ASSERT_EQUAL_32(0xc3b2a188, w27);
3500
3501  ASSERT_EQUAL_32(0x4d5e6f78, w28);
3502  ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
3503
3504  ASSERT_EQUAL_64(256, x30);
3505
3506  TEARDOWN();
3507}
3508
3509
3510TEST(add_sub_negative) {
3511  INIT_V8();
3512  SETUP();
3513
3514  START();
3515  __ Mov(x0, 0);
3516  __ Mov(x1, 4687);
3517  __ Mov(x2, 0x1122334455667788);
3518  __ Mov(w3, 0x11223344);
3519  __ Mov(w4, 400000);
3520
3521  __ Add(x10, x0, -42);
3522  __ Add(x11, x1, -687);
3523  __ Add(x12, x2, -0x88);
3524
3525  __ Sub(x13, x0, -600);
3526  __ Sub(x14, x1, -313);
3527  __ Sub(x15, x2, -0x555);
3528
3529  __ Add(w19, w3, -0x344);
3530  __ Add(w20, w4, -2000);
3531
3532  __ Sub(w21, w3, -0xbc);
3533  __ Sub(w22, w4, -2000);
3534  END();
3535
3536  RUN();
3537
3538  ASSERT_EQUAL_64(-42, x10);
3539  ASSERT_EQUAL_64(4000, x11);
3540  ASSERT_EQUAL_64(0x1122334455667700, x12);
3541
3542  ASSERT_EQUAL_64(600, x13);
3543  ASSERT_EQUAL_64(5000, x14);
3544  ASSERT_EQUAL_64(0x1122334455667cdd, x15);
3545
3546  ASSERT_EQUAL_32(0x11223000, w19);
3547  ASSERT_EQUAL_32(398000, w20);
3548
3549  ASSERT_EQUAL_32(0x11223400, w21);
3550  ASSERT_EQUAL_32(402000, w22);
3551
3552  TEARDOWN();
3553}
3554
3555
3556TEST(add_sub_zero) {
3557  INIT_V8();
3558  SETUP();
3559
3560  START();
3561  __ Mov(x0, 0);
3562  __ Mov(x1, 0);
3563  __ Mov(x2, 0);
3564
3565  Label blob1;
3566  __ Bind(&blob1);
3567  __ Add(x0, x0, 0);
3568  __ Sub(x1, x1, 0);
3569  __ Sub(x2, x2, xzr);
3570  CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
3571
3572  Label blob2;
3573  __ Bind(&blob2);
3574  __ Add(w3, w3, 0);
3575  CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
3576
3577  Label blob3;
3578  __ Bind(&blob3);
3579  __ Sub(w3, w3, wzr);
3580  CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
3581
3582  END();
3583
3584  RUN();
3585
3586  ASSERT_EQUAL_64(0, x0);
3587  ASSERT_EQUAL_64(0, x1);
3588  ASSERT_EQUAL_64(0, x2);
3589
3590  TEARDOWN();
3591}
3592
3593
3594TEST(claim_drop_zero) {
3595  INIT_V8();
3596  SETUP();
3597
3598  START();
3599
3600  Label start;
3601  __ Bind(&start);
3602  __ Claim(0);
3603  __ Drop(0);
3604  __ Claim(xzr, 8);
3605  __ Drop(xzr, 8);
3606  __ Claim(xzr, 0);
3607  __ Drop(xzr, 0);
3608  __ Claim(x7, 0);
3609  __ Drop(x7, 0);
3610  __ ClaimBySMI(xzr, 8);
3611  __ DropBySMI(xzr, 8);
3612  __ ClaimBySMI(xzr, 0);
3613  __ DropBySMI(xzr, 0);
3614  CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
3615
3616  END();
3617
3618  RUN();
3619
3620  TEARDOWN();
3621}
3622
3623
3624TEST(neg) {
3625  INIT_V8();
3626  SETUP();
3627
3628  START();
3629  __ Mov(x0, 0xf123456789abcdefL);
3630
3631  // Immediate.
3632  __ Neg(x1, 0x123);
3633  __ Neg(w2, 0x123);
3634
3635  // Shifted.
3636  __ Neg(x3, Operand(x0, LSL, 1));
3637  __ Neg(w4, Operand(w0, LSL, 2));
3638  __ Neg(x5, Operand(x0, LSR, 3));
3639  __ Neg(w6, Operand(w0, LSR, 4));
3640  __ Neg(x7, Operand(x0, ASR, 5));
3641  __ Neg(w8, Operand(w0, ASR, 6));
3642
3643  // Extended.
3644  __ Neg(w9, Operand(w0, UXTB));
3645  __ Neg(x10, Operand(x0, SXTB, 1));
3646  __ Neg(w11, Operand(w0, UXTH, 2));
3647  __ Neg(x12, Operand(x0, SXTH, 3));
3648  __ Neg(w13, Operand(w0, UXTW, 4));
3649  __ Neg(x14, Operand(x0, SXTW, 4));
3650  END();
3651
3652  RUN();
3653
3654  ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
3655  ASSERT_EQUAL_64(0xfffffedd, x2);
3656  ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
3657  ASSERT_EQUAL_64(0xd950c844, x4);
3658  ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
3659  ASSERT_EQUAL_64(0xf7654322, x6);
3660  ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
3661  ASSERT_EQUAL_64(0x01d950c9, x8);
3662  ASSERT_EQUAL_64(0xffffff11, x9);
3663  ASSERT_EQUAL_64(0x0000000000000022UL, x10);
3664  ASSERT_EQUAL_64(0xfffcc844, x11);
3665  ASSERT_EQUAL_64(0x0000000000019088UL, x12);
3666  ASSERT_EQUAL_64(0x65432110, x13);
3667  ASSERT_EQUAL_64(0x0000000765432110UL, x14);
3668
3669  TEARDOWN();
3670}
3671
3672
3673TEST(adc_sbc_shift) {
3674  INIT_V8();
3675  SETUP();
3676
3677  START();
3678  __ Mov(x0, 0);
3679  __ Mov(x1, 1);
3680  __ Mov(x2, 0x0123456789abcdefL);
3681  __ Mov(x3, 0xfedcba9876543210L);
3682  __ Mov(x4, 0xffffffffffffffffL);
3683
3684  // Clear the C flag.
3685  __ Adds(x0, x0, Operand(0));
3686
3687  __ Adc(x5, x2, Operand(x3));
3688  __ Adc(x6, x0, Operand(x1, LSL, 60));
3689  __ Sbc(x7, x4, Operand(x3, LSR, 4));
3690  __ Adc(x8, x2, Operand(x3, ASR, 4));
3691  __ Adc(x9, x2, Operand(x3, ROR, 8));
3692
3693  __ Adc(w10, w2, Operand(w3));
3694  __ Adc(w11, w0, Operand(w1, LSL, 30));
3695  __ Sbc(w12, w4, Operand(w3, LSR, 4));
3696  __ Adc(w13, w2, Operand(w3, ASR, 4));
3697  __ Adc(w14, w2, Operand(w3, ROR, 8));
3698
3699  // Set the C flag.
3700  __ Cmp(w0, Operand(w0));
3701
3702  __ Adc(x18, x2, Operand(x3));
3703  __ Adc(x19, x0, Operand(x1, LSL, 60));
3704  __ Sbc(x20, x4, Operand(x3, LSR, 4));
3705  __ Adc(x21, x2, Operand(x3, ASR, 4));
3706  __ Adc(x22, x2, Operand(x3, ROR, 8));
3707
3708  __ Adc(w23, w2, Operand(w3));
3709  __ Adc(w24, w0, Operand(w1, LSL, 30));
3710  __ Sbc(w25, w4, Operand(w3, LSR, 4));
3711  __ Adc(w26, w2, Operand(w3, ASR, 4));
3712  __ Adc(w27, w2, Operand(w3, ROR, 8));
3713  END();
3714
3715  RUN();
3716
3717  ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
3718  ASSERT_EQUAL_64(1L << 60, x6);
3719  ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
3720  ASSERT_EQUAL_64(0x0111111111111110L, x8);
3721  ASSERT_EQUAL_64(0x1222222222222221L, x9);
3722
3723  ASSERT_EQUAL_32(0xffffffff, w10);
3724  ASSERT_EQUAL_32(1 << 30, w11);
3725  ASSERT_EQUAL_32(0xf89abcdd, w12);
3726  ASSERT_EQUAL_32(0x91111110, w13);
3727  ASSERT_EQUAL_32(0x9a222221, w14);
3728
3729  ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
3730  ASSERT_EQUAL_64((1L << 60) + 1, x19);
3731  ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
3732  ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
3733  ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
3734
3735  ASSERT_EQUAL_32(0xffffffff + 1, w23);
3736  ASSERT_EQUAL_32((1 << 30) + 1, w24);
3737  ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
3738  ASSERT_EQUAL_32(0x91111110 + 1, w26);
3739  ASSERT_EQUAL_32(0x9a222221 + 1, w27);
3740
3741  // Check that adc correctly sets the condition flags.
3742  START();
3743  __ Mov(x0, 1);
3744  __ Mov(x1, 0xffffffffffffffffL);
3745  // Clear the C flag.
3746  __ Adds(x0, x0, Operand(0));
3747  __ Adcs(x10, x0, Operand(x1));
3748  END();
3749
3750  RUN();
3751
3752  ASSERT_EQUAL_NZCV(ZCFlag);
3753  ASSERT_EQUAL_64(0, x10);
3754
3755  START();
3756  __ Mov(x0, 1);
3757  __ Mov(x1, 0x8000000000000000L);
3758  // Clear the C flag.
3759  __ Adds(x0, x0, Operand(0));
3760  __ Adcs(x10, x0, Operand(x1, ASR, 63));
3761  END();
3762
3763  RUN();
3764
3765  ASSERT_EQUAL_NZCV(ZCFlag);
3766  ASSERT_EQUAL_64(0, x10);
3767
3768  START();
3769  __ Mov(x0, 0x10);
3770  __ Mov(x1, 0x07ffffffffffffffL);
3771  // Clear the C flag.
3772  __ Adds(x0, x0, Operand(0));
3773  __ Adcs(x10, x0, Operand(x1, LSL, 4));
3774  END();
3775
3776  RUN();
3777
3778  ASSERT_EQUAL_NZCV(NVFlag);
3779  ASSERT_EQUAL_64(0x8000000000000000L, x10);
3780
3781  // Check that sbc correctly sets the condition flags.
3782  START();
3783  __ Mov(x0, 0);
3784  __ Mov(x1, 0xffffffffffffffffL);
3785  // Clear the C flag.
3786  __ Adds(x0, x0, Operand(0));
3787  __ Sbcs(x10, x0, Operand(x1));
3788  END();
3789
3790  RUN();
3791
3792  ASSERT_EQUAL_NZCV(ZFlag);
3793  ASSERT_EQUAL_64(0, x10);
3794
3795  START();
3796  __ Mov(x0, 1);
3797  __ Mov(x1, 0xffffffffffffffffL);
3798  // Clear the C flag.
3799  __ Adds(x0, x0, Operand(0));
3800  __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3801  END();
3802
3803  RUN();
3804
3805  ASSERT_EQUAL_NZCV(NFlag);
3806  ASSERT_EQUAL_64(0x8000000000000001L, x10);
3807
3808  START();
3809  __ Mov(x0, 0);
3810  // Clear the C flag.
3811  __ Adds(x0, x0, Operand(0));
3812  __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
3813  END();
3814
3815  RUN();
3816
3817  ASSERT_EQUAL_NZCV(ZFlag);
3818  ASSERT_EQUAL_64(0, x10);
3819
3820  START()
3821  __ Mov(w0, 0x7fffffff);
3822  // Clear the C flag.
3823  __ Adds(x0, x0, Operand(0));
3824  __ Ngcs(w10, w0);
3825  END();
3826
3827  RUN();
3828
3829  ASSERT_EQUAL_NZCV(NFlag);
3830  ASSERT_EQUAL_64(0x80000000, x10);
3831
3832  START();
3833  // Clear the C flag.
3834  __ Adds(x0, x0, Operand(0));
3835  __ Ngcs(x10, 0x7fffffffffffffffL);
3836  END();
3837
3838  RUN();
3839
3840  ASSERT_EQUAL_NZCV(NFlag);
3841  ASSERT_EQUAL_64(0x8000000000000000L, x10);
3842
3843  START()
3844  __ Mov(x0, 0);
3845  // Set the C flag.
3846  __ Cmp(x0, Operand(x0));
3847  __ Sbcs(x10, x0, Operand(1));
3848  END();
3849
3850  RUN();
3851
3852  ASSERT_EQUAL_NZCV(NFlag);
3853  ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
3854
3855  START()
3856  __ Mov(x0, 0);
3857  // Set the C flag.
3858  __ Cmp(x0, Operand(x0));
3859  __ Ngcs(x10, 0x7fffffffffffffffL);
3860  END();
3861
3862  RUN();
3863
3864  ASSERT_EQUAL_NZCV(NFlag);
3865  ASSERT_EQUAL_64(0x8000000000000001L, x10);
3866
3867  TEARDOWN();
3868}
3869
3870
3871TEST(adc_sbc_extend) {
3872  INIT_V8();
3873  SETUP();
3874
3875  START();
3876  // Clear the C flag.
3877  __ Adds(x0, x0, Operand(0));
3878
3879  __ Mov(x0, 0);
3880  __ Mov(x1, 1);
3881  __ Mov(x2, 0x0123456789abcdefL);
3882
3883  __ Adc(x10, x1, Operand(w2, UXTB, 1));
3884  __ Adc(x11, x1, Operand(x2, SXTH, 2));
3885  __ Sbc(x12, x1, Operand(w2, UXTW, 4));
3886  __ Adc(x13, x1, Operand(x2, UXTX, 4));
3887
3888  __ Adc(w14, w1, Operand(w2, UXTB, 1));
3889  __ Adc(w15, w1, Operand(w2, SXTH, 2));
3890  __ Adc(w9, w1, Operand(w2, UXTW, 4));
3891
3892  // Set the C flag.
3893  __ Cmp(w0, Operand(w0));
3894
3895  __ Adc(x20, x1, Operand(w2, UXTB, 1));
3896  __ Adc(x21, x1, Operand(x2, SXTH, 2));
3897  __ Sbc(x22, x1, Operand(w2, UXTW, 4));
3898  __ Adc(x23, x1, Operand(x2, UXTX, 4));
3899
3900  __ Adc(w24, w1, Operand(w2, UXTB, 1));
3901  __ Adc(w25, w1, Operand(w2, SXTH, 2));
3902  __ Adc(w26, w1, Operand(w2, UXTW, 4));
3903  END();
3904
3905  RUN();
3906
3907  ASSERT_EQUAL_64(0x1df, x10);
3908  ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
3909  ASSERT_EQUAL_64(0xfffffff765432110L, x12);
3910  ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
3911
3912  ASSERT_EQUAL_32(0x1df, w14);
3913  ASSERT_EQUAL_32(0xffff37bd, w15);
3914  ASSERT_EQUAL_32(0x9abcdef1, w9);
3915
3916  ASSERT_EQUAL_64(0x1df + 1, x20);
3917  ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
3918  ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
3919  ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
3920
3921  ASSERT_EQUAL_32(0x1df + 1, w24);
3922  ASSERT_EQUAL_32(0xffff37bd + 1, w25);
3923  ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
3924
3925  // Check that adc correctly sets the condition flags.
3926  START();
3927  __ Mov(x0, 0xff);
3928  __ Mov(x1, 0xffffffffffffffffL);
3929  // Clear the C flag.
3930  __ Adds(x0, x0, Operand(0));
3931  __ Adcs(x10, x0, Operand(x1, SXTX, 1));
3932  END();
3933
3934  RUN();
3935
3936  ASSERT_EQUAL_NZCV(CFlag);
3937
3938  START();
3939  __ Mov(x0, 0x7fffffffffffffffL);
3940  __ Mov(x1, 1);
3941  // Clear the C flag.
3942  __ Adds(x0, x0, Operand(0));
3943  __ Adcs(x10, x0, Operand(x1, UXTB, 2));
3944  END();
3945
3946  RUN();
3947
3948  ASSERT_EQUAL_NZCV(NVFlag);
3949
3950  START();
3951  __ Mov(x0, 0x7fffffffffffffffL);
3952  // Clear the C flag.
3953  __ Adds(x0, x0, Operand(0));
3954  __ Adcs(x10, x0, Operand(1));
3955  END();
3956
3957  RUN();
3958
3959  ASSERT_EQUAL_NZCV(NVFlag);
3960
3961  TEARDOWN();
3962}
3963
3964
3965TEST(adc_sbc_wide_imm) {
3966  INIT_V8();
3967  SETUP();
3968
3969  START();
3970  __ Mov(x0, 0);
3971
3972  // Clear the C flag.
3973  __ Adds(x0, x0, Operand(0));
3974
3975  __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
3976  __ Adc(w8, w0, Operand(0xffffffff));
3977  __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
3978  __ Sbc(w10, w0, Operand(0xffffffff));
3979  __ Ngc(x11, Operand(0xffffffff00000000UL));
3980  __ Ngc(w12, Operand(0xffff0000));
3981
3982  // Set the C flag.
3983  __ Cmp(w0, Operand(w0));
3984
3985  __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
3986  __ Adc(w19, w0, Operand(0xffffffff));
3987  __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
3988  __ Sbc(w21, w0, Operand(0xffffffff));
3989  __ Ngc(x22, Operand(0xffffffff00000000UL));
3990  __ Ngc(w23, Operand(0xffff0000));
3991  END();
3992
3993  RUN();
3994
3995  ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
3996  ASSERT_EQUAL_64(0xffffffff, x8);
3997  ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
3998  ASSERT_EQUAL_64(0, x10);
3999  ASSERT_EQUAL_64(0xffffffff, x11);
4000  ASSERT_EQUAL_64(0xffff, x12);
4001
4002  ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
4003  ASSERT_EQUAL_64(0, x19);
4004  ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
4005  ASSERT_EQUAL_64(1, x21);
4006  ASSERT_EQUAL_64(0x100000000UL, x22);
4007  ASSERT_EQUAL_64(0x10000, x23);
4008
4009  TEARDOWN();
4010}
4011
4012
4013TEST(flags) {
4014  INIT_V8();
4015  SETUP();
4016
4017  START();
4018  __ Mov(x0, 0);
4019  __ Mov(x1, 0x1111111111111111L);
4020  __ Neg(x10, Operand(x0));
4021  __ Neg(x11, Operand(x1));
4022  __ Neg(w12, Operand(w1));
4023  // Clear the C flag.
4024  __ Adds(x0, x0, Operand(0));
4025  __ Ngc(x13, Operand(x0));
4026  // Set the C flag.
4027  __ Cmp(x0, Operand(x0));
4028  __ Ngc(w14, Operand(w0));
4029  END();
4030
4031  RUN();
4032
4033  ASSERT_EQUAL_64(0, x10);
4034  ASSERT_EQUAL_64(-0x1111111111111111L, x11);
4035  ASSERT_EQUAL_32(-0x11111111, w12);
4036  ASSERT_EQUAL_64(-1L, x13);
4037  ASSERT_EQUAL_32(0, w14);
4038
4039  START();
4040  __ Mov(x0, 0);
4041  __ Cmp(x0, Operand(x0));
4042  END();
4043
4044  RUN();
4045
4046  ASSERT_EQUAL_NZCV(ZCFlag);
4047
4048  START();
4049  __ Mov(w0, 0);
4050  __ Cmp(w0, Operand(w0));
4051  END();
4052
4053  RUN();
4054
4055  ASSERT_EQUAL_NZCV(ZCFlag);
4056
4057  START();
4058  __ Mov(x0, 0);
4059  __ Mov(x1, 0x1111111111111111L);
4060  __ Cmp(x0, Operand(x1));
4061  END();
4062
4063  RUN();
4064
4065  ASSERT_EQUAL_NZCV(NFlag);
4066
4067  START();
4068  __ Mov(w0, 0);
4069  __ Mov(w1, 0x11111111);
4070  __ Cmp(w0, Operand(w1));
4071  END();
4072
4073  RUN();
4074
4075  ASSERT_EQUAL_NZCV(NFlag);
4076
4077  START();
4078  __ Mov(x1, 0x1111111111111111L);
4079  __ Cmp(x1, Operand(0));
4080  END();
4081
4082  RUN();
4083
4084  ASSERT_EQUAL_NZCV(CFlag);
4085
4086  START();
4087  __ Mov(w1, 0x11111111);
4088  __ Cmp(w1, Operand(0));
4089  END();
4090
4091  RUN();
4092
4093  ASSERT_EQUAL_NZCV(CFlag);
4094
4095  START();
4096  __ Mov(x0, 1);
4097  __ Mov(x1, 0x7fffffffffffffffL);
4098  __ Cmn(x1, Operand(x0));
4099  END();
4100
4101  RUN();
4102
4103  ASSERT_EQUAL_NZCV(NVFlag);
4104
4105  START();
4106  __ Mov(w0, 1);
4107  __ Mov(w1, 0x7fffffff);
4108  __ Cmn(w1, Operand(w0));
4109  END();
4110
4111  RUN();
4112
4113  ASSERT_EQUAL_NZCV(NVFlag);
4114
4115  START();
4116  __ Mov(x0, 1);
4117  __ Mov(x1, 0xffffffffffffffffL);
4118  __ Cmn(x1, Operand(x0));
4119  END();
4120
4121  RUN();
4122
4123  ASSERT_EQUAL_NZCV(ZCFlag);
4124
4125  START();
4126  __ Mov(w0, 1);
4127  __ Mov(w1, 0xffffffff);
4128  __ Cmn(w1, Operand(w0));
4129  END();
4130
4131  RUN();
4132
4133  ASSERT_EQUAL_NZCV(ZCFlag);
4134
4135  START();
4136  __ Mov(w0, 0);
4137  __ Mov(w1, 1);
4138  // Clear the C flag.
4139  __ Adds(w0, w0, Operand(0));
4140  __ Ngcs(w0, Operand(w1));
4141  END();
4142
4143  RUN();
4144
4145  ASSERT_EQUAL_NZCV(NFlag);
4146
4147  START();
4148  __ Mov(w0, 0);
4149  __ Mov(w1, 0);
4150  // Set the C flag.
4151  __ Cmp(w0, Operand(w0));
4152  __ Ngcs(w0, Operand(w1));
4153  END();
4154
4155  RUN();
4156
4157  ASSERT_EQUAL_NZCV(ZCFlag);
4158
4159  TEARDOWN();
4160}
4161
4162
4163TEST(cmp_shift) {
4164  INIT_V8();
4165  SETUP();
4166
4167  START();
4168  __ Mov(x18, 0xf0000000);
4169  __ Mov(x19, 0xf000000010000000UL);
4170  __ Mov(x20, 0xf0000000f0000000UL);
4171  __ Mov(x21, 0x7800000078000000UL);
4172  __ Mov(x22, 0x3c0000003c000000UL);
4173  __ Mov(x23, 0x8000000780000000UL);
4174  __ Mov(x24, 0x0000000f00000000UL);
4175  __ Mov(x25, 0x00000003c0000000UL);
4176  __ Mov(x26, 0x8000000780000000UL);
4177  __ Mov(x27, 0xc0000003);
4178
4179  __ Cmp(w20, Operand(w21, LSL, 1));
4180  __ Mrs(x0, NZCV);
4181
4182  __ Cmp(x20, Operand(x22, LSL, 2));
4183  __ Mrs(x1, NZCV);
4184
4185  __ Cmp(w19, Operand(w23, LSR, 3));
4186  __ Mrs(x2, NZCV);
4187
4188  __ Cmp(x18, Operand(x24, LSR, 4));
4189  __ Mrs(x3, NZCV);
4190
4191  __ Cmp(w20, Operand(w25, ASR, 2));
4192  __ Mrs(x4, NZCV);
4193
4194  __ Cmp(x20, Operand(x26, ASR, 3));
4195  __ Mrs(x5, NZCV);
4196
4197  __ Cmp(w27, Operand(w22, ROR, 28));
4198  __ Mrs(x6, NZCV);
4199
4200  __ Cmp(x20, Operand(x21, ROR, 31));
4201  __ Mrs(x7, NZCV);
4202  END();
4203
4204  RUN();
4205
4206  ASSERT_EQUAL_32(ZCFlag, w0);
4207  ASSERT_EQUAL_32(ZCFlag, w1);
4208  ASSERT_EQUAL_32(ZCFlag, w2);
4209  ASSERT_EQUAL_32(ZCFlag, w3);
4210  ASSERT_EQUAL_32(ZCFlag, w4);
4211  ASSERT_EQUAL_32(ZCFlag, w5);
4212  ASSERT_EQUAL_32(ZCFlag, w6);
4213  ASSERT_EQUAL_32(ZCFlag, w7);
4214
4215  TEARDOWN();
4216}
4217
4218
4219TEST(cmp_extend) {
4220  INIT_V8();
4221  SETUP();
4222
4223  START();
4224  __ Mov(w20, 0x2);
4225  __ Mov(w21, 0x1);
4226  __ Mov(x22, 0xffffffffffffffffUL);
4227  __ Mov(x23, 0xff);
4228  __ Mov(x24, 0xfffffffffffffffeUL);
4229  __ Mov(x25, 0xffff);
4230  __ Mov(x26, 0xffffffff);
4231
4232  __ Cmp(w20, Operand(w21, LSL, 1));
4233  __ Mrs(x0, NZCV);
4234
4235  __ Cmp(x22, Operand(x23, SXTB, 0));
4236  __ Mrs(x1, NZCV);
4237
4238  __ Cmp(x24, Operand(x23, SXTB, 1));
4239  __ Mrs(x2, NZCV);
4240
4241  __ Cmp(x24, Operand(x23, UXTB, 1));
4242  __ Mrs(x3, NZCV);
4243
4244  __ Cmp(w22, Operand(w25, UXTH));
4245  __ Mrs(x4, NZCV);
4246
4247  __ Cmp(x22, Operand(x25, SXTH));
4248  __ Mrs(x5, NZCV);
4249
4250  __ Cmp(x22, Operand(x26, UXTW));
4251  __ Mrs(x6, NZCV);
4252
4253  __ Cmp(x24, Operand(x26, SXTW, 1));
4254  __ Mrs(x7, NZCV);
4255  END();
4256
4257  RUN();
4258
4259  ASSERT_EQUAL_32(ZCFlag, w0);
4260  ASSERT_EQUAL_32(ZCFlag, w1);
4261  ASSERT_EQUAL_32(ZCFlag, w2);
4262  ASSERT_EQUAL_32(NCFlag, w3);
4263  ASSERT_EQUAL_32(NCFlag, w4);
4264  ASSERT_EQUAL_32(ZCFlag, w5);
4265  ASSERT_EQUAL_32(NCFlag, w6);
4266  ASSERT_EQUAL_32(ZCFlag, w7);
4267
4268  TEARDOWN();
4269}
4270
4271
4272TEST(ccmp) {
4273  INIT_V8();
4274  SETUP();
4275
4276  START();
4277  __ Mov(w16, 0);
4278  __ Mov(w17, 1);
4279  __ Cmp(w16, w16);
4280  __ Ccmp(w16, w17, NCFlag, eq);
4281  __ Mrs(x0, NZCV);
4282
4283  __ Cmp(w16, w16);
4284  __ Ccmp(w16, w17, NCFlag, ne);
4285  __ Mrs(x1, NZCV);
4286
4287  __ Cmp(x16, x16);
4288  __ Ccmn(x16, 2, NZCVFlag, eq);
4289  __ Mrs(x2, NZCV);
4290
4291  __ Cmp(x16, x16);
4292  __ Ccmn(x16, 2, NZCVFlag, ne);
4293  __ Mrs(x3, NZCV);
4294
4295  __ ccmp(x16, x16, NZCVFlag, al);
4296  __ Mrs(x4, NZCV);
4297
4298  __ ccmp(x16, x16, NZCVFlag, nv);
4299  __ Mrs(x5, NZCV);
4300
4301  END();
4302
4303  RUN();
4304
4305  ASSERT_EQUAL_32(NFlag, w0);
4306  ASSERT_EQUAL_32(NCFlag, w1);
4307  ASSERT_EQUAL_32(NoFlag, w2);
4308  ASSERT_EQUAL_32(NZCVFlag, w3);
4309  ASSERT_EQUAL_32(ZCFlag, w4);
4310  ASSERT_EQUAL_32(ZCFlag, w5);
4311
4312  TEARDOWN();
4313}
4314
4315
4316TEST(ccmp_wide_imm) {
4317  INIT_V8();
4318  SETUP();
4319
4320  START();
4321  __ Mov(w20, 0);
4322
4323  __ Cmp(w20, Operand(w20));
4324  __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4325  __ Mrs(x0, NZCV);
4326
4327  __ Cmp(w20, Operand(w20));
4328  __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
4329  __ Mrs(x1, NZCV);
4330  END();
4331
4332  RUN();
4333
4334  ASSERT_EQUAL_32(NFlag, w0);
4335  ASSERT_EQUAL_32(NoFlag, w1);
4336
4337  TEARDOWN();
4338}
4339
4340
4341TEST(ccmp_shift_extend) {
4342  INIT_V8();
4343  SETUP();
4344
4345  START();
4346  __ Mov(w20, 0x2);
4347  __ Mov(w21, 0x1);
4348  __ Mov(x22, 0xffffffffffffffffUL);
4349  __ Mov(x23, 0xff);
4350  __ Mov(x24, 0xfffffffffffffffeUL);
4351
4352  __ Cmp(w20, Operand(w20));
4353  __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4354  __ Mrs(x0, NZCV);
4355
4356  __ Cmp(w20, Operand(w20));
4357  __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4358  __ Mrs(x1, NZCV);
4359
4360  __ Cmp(w20, Operand(w20));
4361  __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4362  __ Mrs(x2, NZCV);
4363
4364  __ Cmp(w20, Operand(w20));
4365  __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4366  __ Mrs(x3, NZCV);
4367
4368  __ Cmp(w20, Operand(w20));
4369  __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4370  __ Mrs(x4, NZCV);
4371  END();
4372
4373  RUN();
4374
4375  ASSERT_EQUAL_32(ZCFlag, w0);
4376  ASSERT_EQUAL_32(ZCFlag, w1);
4377  ASSERT_EQUAL_32(ZCFlag, w2);
4378  ASSERT_EQUAL_32(NCFlag, w3);
4379  ASSERT_EQUAL_32(NZCVFlag, w4);
4380
4381  TEARDOWN();
4382}
4383
4384
4385TEST(csel) {
4386  INIT_V8();
4387  SETUP();
4388
4389  START();
4390  __ Mov(x16, 0);
4391  __ Mov(x24, 0x0000000f0000000fUL);
4392  __ Mov(x25, 0x0000001f0000001fUL);
4393  __ Mov(x26, 0);
4394  __ Mov(x27, 0);
4395
4396  __ Cmp(w16, 0);
4397  __ Csel(w0, w24, w25, eq);
4398  __ Csel(w1, w24, w25, ne);
4399  __ Csinc(w2, w24, w25, mi);
4400  __ Csinc(w3, w24, w25, pl);
4401
4402  __ csel(w13, w24, w25, al);
4403  __ csel(x14, x24, x25, nv);
4404
4405  __ Cmp(x16, 1);
4406  __ Csinv(x4, x24, x25, gt);
4407  __ Csinv(x5, x24, x25, le);
4408  __ Csneg(x6, x24, x25, hs);
4409  __ Csneg(x7, x24, x25, lo);
4410
4411  __ Cset(w8, ne);
4412  __ Csetm(w9, ne);
4413  __ Cinc(x10, x25, ne);
4414  __ Cinv(x11, x24, ne);
4415  __ Cneg(x12, x24, ne);
4416
4417  __ csel(w15, w24, w25, al);
4418  __ csel(x18, x24, x25, nv);
4419
4420  __ CzeroX(x24, ne);
4421  __ CzeroX(x25, eq);
4422
4423  __ CmovX(x26, x25, ne);
4424  __ CmovX(x27, x25, eq);
4425  END();
4426
4427  RUN();
4428
4429  ASSERT_EQUAL_64(0x0000000f, x0);
4430  ASSERT_EQUAL_64(0x0000001f, x1);
4431  ASSERT_EQUAL_64(0x00000020, x2);
4432  ASSERT_EQUAL_64(0x0000000f, x3);
4433  ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
4434  ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
4435  ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
4436  ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
4437  ASSERT_EQUAL_64(0x00000001, x8);
4438  ASSERT_EQUAL_64(0xffffffff, x9);
4439  ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
4440  ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
4441  ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
4442  ASSERT_EQUAL_64(0x0000000f, x13);
4443  ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
4444  ASSERT_EQUAL_64(0x0000000f, x15);
4445  ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
4446  ASSERT_EQUAL_64(0, x24);
4447  ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
4448  ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
4449  ASSERT_EQUAL_64(0, x27);
4450
4451  TEARDOWN();
4452}
4453
4454
4455TEST(csel_imm) {
4456  INIT_V8();
4457  SETUP();
4458
4459  START();
4460  __ Mov(x18, 0);
4461  __ Mov(x19, 0x80000000);
4462  __ Mov(x20, 0x8000000000000000UL);
4463
4464  __ Cmp(x18, Operand(0));
4465  __ Csel(w0, w19, -2, ne);
4466  __ Csel(w1, w19, -1, ne);
4467  __ Csel(w2, w19, 0, ne);
4468  __ Csel(w3, w19, 1, ne);
4469  __ Csel(w4, w19, 2, ne);
4470  __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4471  __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4472  __ Csel(w7, w19, 3, eq);
4473
4474  __ Csel(x8, x20, -2, ne);
4475  __ Csel(x9, x20, -1, ne);
4476  __ Csel(x10, x20, 0, ne);
4477  __ Csel(x11, x20, 1, ne);
4478  __ Csel(x12, x20, 2, ne);
4479  __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4480  __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4481  __ Csel(x15, x20, 3, eq);
4482
4483  END();
4484
4485  RUN();
4486
4487  ASSERT_EQUAL_32(-2, w0);
4488  ASSERT_EQUAL_32(-1, w1);
4489  ASSERT_EQUAL_32(0, w2);
4490  ASSERT_EQUAL_32(1, w3);
4491  ASSERT_EQUAL_32(2, w4);
4492  ASSERT_EQUAL_32(-1, w5);
4493  ASSERT_EQUAL_32(0x40000000, w6);
4494  ASSERT_EQUAL_32(0x80000000, w7);
4495
4496  ASSERT_EQUAL_64(-2, x8);
4497  ASSERT_EQUAL_64(-1, x9);
4498  ASSERT_EQUAL_64(0, x10);
4499  ASSERT_EQUAL_64(1, x11);
4500  ASSERT_EQUAL_64(2, x12);
4501  ASSERT_EQUAL_64(-1, x13);
4502  ASSERT_EQUAL_64(0x4000000000000000UL, x14);
4503  ASSERT_EQUAL_64(0x8000000000000000UL, x15);
4504
4505  TEARDOWN();
4506}
4507
4508
4509TEST(lslv) {
4510  INIT_V8();
4511  SETUP();
4512
4513  uint64_t value = 0x0123456789abcdefUL;
4514  int shift[] = {1, 3, 5, 9, 17, 33};
4515
4516  START();
4517  __ Mov(x0, value);
4518  __ Mov(w1, shift[0]);
4519  __ Mov(w2, shift[1]);
4520  __ Mov(w3, shift[2]);
4521  __ Mov(w4, shift[3]);
4522  __ Mov(w5, shift[4]);
4523  __ Mov(w6, shift[5]);
4524
4525  __ lslv(x0, x0, xzr);
4526
4527  __ Lsl(x16, x0, x1);
4528  __ Lsl(x17, x0, x2);
4529  __ Lsl(x18, x0, x3);
4530  __ Lsl(x19, x0, x4);
4531  __ Lsl(x20, x0, x5);
4532  __ Lsl(x21, x0, x6);
4533
4534  __ Lsl(w22, w0, w1);
4535  __ Lsl(w23, w0, w2);
4536  __ Lsl(w24, w0, w3);
4537  __ Lsl(w25, w0, w4);
4538  __ Lsl(w26, w0, w5);
4539  __ Lsl(w27, w0, w6);
4540  END();
4541
4542  RUN();
4543
4544  ASSERT_EQUAL_64(value, x0);
4545  ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
4546  ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
4547  ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
4548  ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
4549  ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
4550  ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
4551  ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
4552  ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
4553  ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
4554  ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
4555  ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
4556  ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
4557
4558  TEARDOWN();
4559}
4560
4561
4562TEST(lsrv) {
4563  INIT_V8();
4564  SETUP();
4565
4566  uint64_t value = 0x0123456789abcdefUL;
4567  int shift[] = {1, 3, 5, 9, 17, 33};
4568
4569  START();
4570  __ Mov(x0, value);
4571  __ Mov(w1, shift[0]);
4572  __ Mov(w2, shift[1]);
4573  __ Mov(w3, shift[2]);
4574  __ Mov(w4, shift[3]);
4575  __ Mov(w5, shift[4]);
4576  __ Mov(w6, shift[5]);
4577
4578  __ lsrv(x0, x0, xzr);
4579
4580  __ Lsr(x16, x0, x1);
4581  __ Lsr(x17, x0, x2);
4582  __ Lsr(x18, x0, x3);
4583  __ Lsr(x19, x0, x4);
4584  __ Lsr(x20, x0, x5);
4585  __ Lsr(x21, x0, x6);
4586
4587  __ Lsr(w22, w0, w1);
4588  __ Lsr(w23, w0, w2);
4589  __ Lsr(w24, w0, w3);
4590  __ Lsr(w25, w0, w4);
4591  __ Lsr(w26, w0, w5);
4592  __ Lsr(w27, w0, w6);
4593  END();
4594
4595  RUN();
4596
4597  ASSERT_EQUAL_64(value, x0);
4598  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4599  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4600  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4601  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4602  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4603  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4604
4605  value &= 0xffffffffUL;
4606  ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
4607  ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
4608  ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
4609  ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
4610  ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
4611  ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
4612
4613  TEARDOWN();
4614}
4615
4616
4617TEST(asrv) {
4618  INIT_V8();
4619  SETUP();
4620
4621  int64_t value = 0xfedcba98fedcba98UL;
4622  int shift[] = {1, 3, 5, 9, 17, 33};
4623
4624  START();
4625  __ Mov(x0, value);
4626  __ Mov(w1, shift[0]);
4627  __ Mov(w2, shift[1]);
4628  __ Mov(w3, shift[2]);
4629  __ Mov(w4, shift[3]);
4630  __ Mov(w5, shift[4]);
4631  __ Mov(w6, shift[5]);
4632
4633  __ asrv(x0, x0, xzr);
4634
4635  __ Asr(x16, x0, x1);
4636  __ Asr(x17, x0, x2);
4637  __ Asr(x18, x0, x3);
4638  __ Asr(x19, x0, x4);
4639  __ Asr(x20, x0, x5);
4640  __ Asr(x21, x0, x6);
4641
4642  __ Asr(w22, w0, w1);
4643  __ Asr(w23, w0, w2);
4644  __ Asr(w24, w0, w3);
4645  __ Asr(w25, w0, w4);
4646  __ Asr(w26, w0, w5);
4647  __ Asr(w27, w0, w6);
4648  END();
4649
4650  RUN();
4651
4652  ASSERT_EQUAL_64(value, x0);
4653  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4654  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4655  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4656  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4657  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4658  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4659
4660  int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
4661  ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
4662  ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
4663  ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
4664  ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
4665  ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
4666  ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
4667
4668  TEARDOWN();
4669}
4670
4671
4672TEST(rorv) {
4673  INIT_V8();
4674  SETUP();
4675
4676  uint64_t value = 0x0123456789abcdefUL;
4677  int shift[] = {4, 8, 12, 16, 24, 36};
4678
4679  START();
4680  __ Mov(x0, value);
4681  __ Mov(w1, shift[0]);
4682  __ Mov(w2, shift[1]);
4683  __ Mov(w3, shift[2]);
4684  __ Mov(w4, shift[3]);
4685  __ Mov(w5, shift[4]);
4686  __ Mov(w6, shift[5]);
4687
4688  __ rorv(x0, x0, xzr);
4689
4690  __ Ror(x16, x0, x1);
4691  __ Ror(x17, x0, x2);
4692  __ Ror(x18, x0, x3);
4693  __ Ror(x19, x0, x4);
4694  __ Ror(x20, x0, x5);
4695  __ Ror(x21, x0, x6);
4696
4697  __ Ror(w22, w0, w1);
4698  __ Ror(w23, w0, w2);
4699  __ Ror(w24, w0, w3);
4700  __ Ror(w25, w0, w4);
4701  __ Ror(w26, w0, w5);
4702  __ Ror(w27, w0, w6);
4703  END();
4704
4705  RUN();
4706
4707  ASSERT_EQUAL_64(value, x0);
4708  ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
4709  ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
4710  ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
4711  ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
4712  ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
4713  ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
4714  ASSERT_EQUAL_32(0xf89abcde, w22);
4715  ASSERT_EQUAL_32(0xef89abcd, w23);
4716  ASSERT_EQUAL_32(0xdef89abc, w24);
4717  ASSERT_EQUAL_32(0xcdef89ab, w25);
4718  ASSERT_EQUAL_32(0xabcdef89, w26);
4719  ASSERT_EQUAL_32(0xf89abcde, w27);
4720
4721  TEARDOWN();
4722}
4723
4724
4725TEST(bfm) {
4726  INIT_V8();
4727  SETUP();
4728
4729  START();
4730  __ Mov(x1, 0x0123456789abcdefL);
4731
4732  __ Mov(x10, 0x8888888888888888L);
4733  __ Mov(x11, 0x8888888888888888L);
4734  __ Mov(x12, 0x8888888888888888L);
4735  __ Mov(x13, 0x8888888888888888L);
4736  __ Mov(w20, 0x88888888);
4737  __ Mov(w21, 0x88888888);
4738
4739  __ bfm(x10, x1, 16, 31);
4740  __ bfm(x11, x1, 32, 15);
4741
4742  __ bfm(w20, w1, 16, 23);
4743  __ bfm(w21, w1, 24, 15);
4744
4745  // Aliases.
4746  __ Bfi(x12, x1, 16, 8);
4747  __ Bfxil(x13, x1, 16, 8);
4748  END();
4749
4750  RUN();
4751
4752
4753  ASSERT_EQUAL_64(0x88888888888889abL, x10);
4754  ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
4755
4756  ASSERT_EQUAL_32(0x888888ab, w20);
4757  ASSERT_EQUAL_32(0x88cdef88, w21);
4758
4759  ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
4760  ASSERT_EQUAL_64(0x88888888888888abL, x13);
4761
4762  TEARDOWN();
4763}
4764
4765
4766TEST(sbfm) {
4767  INIT_V8();
4768  SETUP();
4769
4770  START();
4771  __ Mov(x1, 0x0123456789abcdefL);
4772  __ Mov(x2, 0xfedcba9876543210L);
4773
4774  __ sbfm(x10, x1, 16, 31);
4775  __ sbfm(x11, x1, 32, 15);
4776  __ sbfm(x12, x1, 32, 47);
4777  __ sbfm(x13, x1, 48, 35);
4778
4779  __ sbfm(w14, w1, 16, 23);
4780  __ sbfm(w15, w1, 24, 15);
4781  __ sbfm(w16, w2, 16, 23);
4782  __ sbfm(w17, w2, 24, 15);
4783
4784  // Aliases.
4785  __ Asr(x18, x1, 32);
4786  __ Asr(x19, x2, 32);
4787  __ Sbfiz(x20, x1, 8, 16);
4788  __ Sbfiz(x21, x2, 8, 16);
4789  __ Sbfx(x22, x1, 8, 16);
4790  __ Sbfx(x23, x2, 8, 16);
4791  __ Sxtb(x24, w1);
4792  __ Sxtb(x25, x2);
4793  __ Sxth(x26, w1);
4794  __ Sxth(x27, x2);
4795  __ Sxtw(x28, w1);
4796  __ Sxtw(x29, x2);
4797  END();
4798
4799  RUN();
4800
4801
4802  ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
4803  ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
4804  ASSERT_EQUAL_64(0x4567L, x12);
4805  ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4806
4807  ASSERT_EQUAL_32(0xffffffab, w14);
4808  ASSERT_EQUAL_32(0xffcdef00, w15);
4809  ASSERT_EQUAL_32(0x54, w16);
4810  ASSERT_EQUAL_32(0x00321000, w17);
4811
4812  ASSERT_EQUAL_64(0x01234567L, x18);
4813  ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
4814  ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
4815  ASSERT_EQUAL_64(0x321000L, x21);
4816  ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
4817  ASSERT_EQUAL_64(0x5432L, x23);
4818  ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
4819  ASSERT_EQUAL_64(0x10, x25);
4820  ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
4821  ASSERT_EQUAL_64(0x3210, x27);
4822  ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
4823  ASSERT_EQUAL_64(0x76543210, x29);
4824
4825  TEARDOWN();
4826}
4827
4828
4829TEST(ubfm) {
4830  INIT_V8();
4831  SETUP();
4832
4833  START();
4834  __ Mov(x1, 0x0123456789abcdefL);
4835  __ Mov(x2, 0xfedcba9876543210L);
4836
4837  __ Mov(x10, 0x8888888888888888L);
4838  __ Mov(x11, 0x8888888888888888L);
4839
4840  __ ubfm(x10, x1, 16, 31);
4841  __ ubfm(x11, x1, 32, 15);
4842  __ ubfm(x12, x1, 32, 47);
4843  __ ubfm(x13, x1, 48, 35);
4844
4845  __ ubfm(w25, w1, 16, 23);
4846  __ ubfm(w26, w1, 24, 15);
4847  __ ubfm(w27, w2, 16, 23);
4848  __ ubfm(w28, w2, 24, 15);
4849
4850  // Aliases
4851  __ Lsl(x15, x1, 63);
4852  __ Lsl(x16, x1, 0);
4853  __ Lsr(x17, x1, 32);
4854  __ Ubfiz(x18, x1, 8, 16);
4855  __ Ubfx(x19, x1, 8, 16);
4856  __ Uxtb(x20, x1);
4857  __ Uxth(x21, x1);
4858  __ Uxtw(x22, x1);
4859  END();
4860
4861  RUN();
4862
4863  ASSERT_EQUAL_64(0x00000000000089abL, x10);
4864  ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
4865  ASSERT_EQUAL_64(0x4567L, x12);
4866  ASSERT_EQUAL_64(0x789abcdef0000L, x13);
4867
4868  ASSERT_EQUAL_32(0x000000ab, w25);
4869  ASSERT_EQUAL_32(0x00cdef00, w26);
4870  ASSERT_EQUAL_32(0x54, w27);
4871  ASSERT_EQUAL_32(0x00321000, w28);
4872
4873  ASSERT_EQUAL_64(0x8000000000000000L, x15);
4874  ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
4875  ASSERT_EQUAL_64(0x01234567L, x17);
4876  ASSERT_EQUAL_64(0xcdef00L, x18);
4877  ASSERT_EQUAL_64(0xabcdL, x19);
4878  ASSERT_EQUAL_64(0xefL, x20);
4879  ASSERT_EQUAL_64(0xcdefL, x21);
4880  ASSERT_EQUAL_64(0x89abcdefL, x22);
4881
4882  TEARDOWN();
4883}
4884
4885
4886TEST(extr) {
4887  INIT_V8();
4888  SETUP();
4889
4890  START();
4891  __ Mov(x1, 0x0123456789abcdefL);
4892  __ Mov(x2, 0xfedcba9876543210L);
4893
4894  __ Extr(w10, w1, w2, 0);
4895  __ Extr(x11, x1, x2, 0);
4896  __ Extr(w12, w1, w2, 1);
4897  __ Extr(x13, x2, x1, 2);
4898
4899  __ Ror(w20, w1, 0);
4900  __ Ror(x21, x1, 0);
4901  __ Ror(w22, w2, 17);
4902  __ Ror(w23, w1, 31);
4903  __ Ror(x24, x2, 1);
4904  __ Ror(x25, x1, 63);
4905  END();
4906
4907  RUN();
4908
4909  ASSERT_EQUAL_64(0x76543210, x10);
4910  ASSERT_EQUAL_64(0xfedcba9876543210L, x11);
4911  ASSERT_EQUAL_64(0xbb2a1908, x12);
4912  ASSERT_EQUAL_64(0x0048d159e26af37bUL, x13);
4913  ASSERT_EQUAL_64(0x89abcdef, x20);
4914  ASSERT_EQUAL_64(0x0123456789abcdefL, x21);
4915  ASSERT_EQUAL_64(0x19083b2a, x22);
4916  ASSERT_EQUAL_64(0x13579bdf, x23);
4917  ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x24);
4918  ASSERT_EQUAL_64(0x02468acf13579bdeUL, x25);
4919
4920  TEARDOWN();
4921}
4922
4923
4924TEST(fmov_imm) {
4925  INIT_V8();
4926  SETUP();
4927
4928  START();
4929  __ Fmov(s11, 1.0);
4930  __ Fmov(d22, -13.0);
4931  __ Fmov(s1, 255.0);
4932  __ Fmov(d2, 12.34567);
4933  __ Fmov(s3, 0.0);
4934  __ Fmov(d4, 0.0);
4935  __ Fmov(s5, kFP32PositiveInfinity);
4936  __ Fmov(d6, kFP64NegativeInfinity);
4937  END();
4938
4939  RUN();
4940
4941  ASSERT_EQUAL_FP32(1.0, s11);
4942  ASSERT_EQUAL_FP64(-13.0, d22);
4943  ASSERT_EQUAL_FP32(255.0, s1);
4944  ASSERT_EQUAL_FP64(12.34567, d2);
4945  ASSERT_EQUAL_FP32(0.0, s3);
4946  ASSERT_EQUAL_FP64(0.0, d4);
4947  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
4948  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
4949
4950  TEARDOWN();
4951}
4952
4953
4954TEST(fmov_reg) {
4955  INIT_V8();
4956  SETUP();
4957
4958  START();
4959  __ Fmov(s20, 1.0);
4960  __ Fmov(w10, s20);
4961  __ Fmov(s30, w10);
4962  __ Fmov(s5, s20);
4963  __ Fmov(d1, -13.0);
4964  __ Fmov(x1, d1);
4965  __ Fmov(d2, x1);
4966  __ Fmov(d4, d1);
4967  __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
4968  __ Fmov(s6, s6);
4969  END();
4970
4971  RUN();
4972
4973  ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
4974  ASSERT_EQUAL_FP32(1.0, s30);
4975  ASSERT_EQUAL_FP32(1.0, s5);
4976  ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
4977  ASSERT_EQUAL_FP64(-13.0, d2);
4978  ASSERT_EQUAL_FP64(-13.0, d4);
4979  ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
4980
4981  TEARDOWN();
4982}
4983
4984
4985TEST(fadd) {
4986  INIT_V8();
4987  SETUP();
4988
4989  START();
4990  __ Fmov(s14, -0.0f);
4991  __ Fmov(s15, kFP32PositiveInfinity);
4992  __ Fmov(s16, kFP32NegativeInfinity);
4993  __ Fmov(s17, 3.25f);
4994  __ Fmov(s18, 1.0f);
4995  __ Fmov(s19, 0.0f);
4996
4997  __ Fmov(d26, -0.0);
4998  __ Fmov(d27, kFP64PositiveInfinity);
4999  __ Fmov(d28, kFP64NegativeInfinity);
5000  __ Fmov(d29, 0.0);
5001  __ Fmov(d30, -2.0);
5002  __ Fmov(d31, 2.25);
5003
5004  __ Fadd(s0, s17, s18);
5005  __ Fadd(s1, s18, s19);
5006  __ Fadd(s2, s14, s18);
5007  __ Fadd(s3, s15, s18);
5008  __ Fadd(s4, s16, s18);
5009  __ Fadd(s5, s15, s16);
5010  __ Fadd(s6, s16, s15);
5011
5012  __ Fadd(d7, d30, d31);
5013  __ Fadd(d8, d29, d31);
5014  __ Fadd(d9, d26, d31);
5015  __ Fadd(d10, d27, d31);
5016  __ Fadd(d11, d28, d31);
5017  __ Fadd(d12, d27, d28);
5018  __ Fadd(d13, d28, d27);
5019  END();
5020
5021  RUN();
5022
5023  ASSERT_EQUAL_FP32(4.25, s0);
5024  ASSERT_EQUAL_FP32(1.0, s1);
5025  ASSERT_EQUAL_FP32(1.0, s2);
5026  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
5027  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
5028  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5029  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5030  ASSERT_EQUAL_FP64(0.25, d7);
5031  ASSERT_EQUAL_FP64(2.25, d8);
5032  ASSERT_EQUAL_FP64(2.25, d9);
5033  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
5034  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
5035  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5036  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5037
5038  TEARDOWN();
5039}
5040
5041
5042TEST(fsub) {
5043  INIT_V8();
5044  SETUP();
5045
5046  START();
5047  __ Fmov(s14, -0.0f);
5048  __ Fmov(s15, kFP32PositiveInfinity);
5049  __ Fmov(s16, kFP32NegativeInfinity);
5050  __ Fmov(s17, 3.25f);
5051  __ Fmov(s18, 1.0f);
5052  __ Fmov(s19, 0.0f);
5053
5054  __ Fmov(d26, -0.0);
5055  __ Fmov(d27, kFP64PositiveInfinity);
5056  __ Fmov(d28, kFP64NegativeInfinity);
5057  __ Fmov(d29, 0.0);
5058  __ Fmov(d30, -2.0);
5059  __ Fmov(d31, 2.25);
5060
5061  __ Fsub(s0, s17, s18);
5062  __ Fsub(s1, s18, s19);
5063  __ Fsub(s2, s14, s18);
5064  __ Fsub(s3, s18, s15);
5065  __ Fsub(s4, s18, s16);
5066  __ Fsub(s5, s15, s15);
5067  __ Fsub(s6, s16, s16);
5068
5069  __ Fsub(d7, d30, d31);
5070  __ Fsub(d8, d29, d31);
5071  __ Fsub(d9, d26, d31);
5072  __ Fsub(d10, d31, d27);
5073  __ Fsub(d11, d31, d28);
5074  __ Fsub(d12, d27, d27);
5075  __ Fsub(d13, d28, d28);
5076  END();
5077
5078  RUN();
5079
5080  ASSERT_EQUAL_FP32(2.25, s0);
5081  ASSERT_EQUAL_FP32(1.0, s1);
5082  ASSERT_EQUAL_FP32(-1.0, s2);
5083  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5084  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5085  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5086  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5087  ASSERT_EQUAL_FP64(-4.25, d7);
5088  ASSERT_EQUAL_FP64(-2.25, d8);
5089  ASSERT_EQUAL_FP64(-2.25, d9);
5090  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5091  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5092  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5093  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5094
5095  TEARDOWN();
5096}
5097
5098
5099TEST(fmul) {
5100  INIT_V8();
5101  SETUP();
5102
5103  START();
5104  __ Fmov(s14, -0.0f);
5105  __ Fmov(s15, kFP32PositiveInfinity);
5106  __ Fmov(s16, kFP32NegativeInfinity);
5107  __ Fmov(s17, 3.25f);
5108  __ Fmov(s18, 2.0f);
5109  __ Fmov(s19, 0.0f);
5110  __ Fmov(s20, -2.0f);
5111
5112  __ Fmov(d26, -0.0);
5113  __ Fmov(d27, kFP64PositiveInfinity);
5114  __ Fmov(d28, kFP64NegativeInfinity);
5115  __ Fmov(d29, 0.0);
5116  __ Fmov(d30, -2.0);
5117  __ Fmov(d31, 2.25);
5118
5119  __ Fmul(s0, s17, s18);
5120  __ Fmul(s1, s18, s19);
5121  __ Fmul(s2, s14, s14);
5122  __ Fmul(s3, s15, s20);
5123  __ Fmul(s4, s16, s20);
5124  __ Fmul(s5, s15, s19);
5125  __ Fmul(s6, s19, s16);
5126
5127  __ Fmul(d7, d30, d31);
5128  __ Fmul(d8, d29, d31);
5129  __ Fmul(d9, d26, d26);
5130  __ Fmul(d10, d27, d30);
5131  __ Fmul(d11, d28, d30);
5132  __ Fmul(d12, d27, d29);
5133  __ Fmul(d13, d29, d28);
5134  END();
5135
5136  RUN();
5137
5138  ASSERT_EQUAL_FP32(6.5, s0);
5139  ASSERT_EQUAL_FP32(0.0, s1);
5140  ASSERT_EQUAL_FP32(0.0, s2);
5141  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5142  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
5143  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5144  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5145  ASSERT_EQUAL_FP64(-4.5, d7);
5146  ASSERT_EQUAL_FP64(0.0, d8);
5147  ASSERT_EQUAL_FP64(0.0, d9);
5148  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5149  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5150  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5151  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5152
5153  TEARDOWN();
5154}
5155
5156
5157static void FmaddFmsubHelper(double n, double m, double a,
5158                             double fmadd, double fmsub,
5159                             double fnmadd, double fnmsub) {
5160  SETUP();
5161  START();
5162
5163  __ Fmov(d0, n);
5164  __ Fmov(d1, m);
5165  __ Fmov(d2, a);
5166  __ Fmadd(d28, d0, d1, d2);
5167  __ Fmsub(d29, d0, d1, d2);
5168  __ Fnmadd(d30, d0, d1, d2);
5169  __ Fnmsub(d31, d0, d1, d2);
5170
5171  END();
5172  RUN();
5173
5174  ASSERT_EQUAL_FP64(fmadd, d28);
5175  ASSERT_EQUAL_FP64(fmsub, d29);
5176  ASSERT_EQUAL_FP64(fnmadd, d30);
5177  ASSERT_EQUAL_FP64(fnmsub, d31);
5178
5179  TEARDOWN();
5180}
5181
5182
5183TEST(fmadd_fmsub_double) {
5184  INIT_V8();
5185
5186  // It's hard to check the result of fused operations because the only way to
5187  // calculate the result is using fma, which is what the simulator uses anyway.
5188  // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5189
5190  // Basic operation.
5191  FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
5192  FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
5193
5194  // Check the sign of exact zeroes.
5195  //               n     m     a     fmadd  fmsub  fnmadd fnmsub
5196  FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
5197  FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
5198  FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
5199  FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
5200  FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
5201  FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
5202  FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
5203  FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
5204
5205  // Check NaN generation.
5206  FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
5207                   kFP64DefaultNaN, kFP64DefaultNaN,
5208                   kFP64DefaultNaN, kFP64DefaultNaN);
5209  FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
5210                   kFP64DefaultNaN, kFP64DefaultNaN,
5211                   kFP64DefaultNaN, kFP64DefaultNaN);
5212  FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
5213                   kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
5214                   kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
5215                   kFP64NegativeInfinity,   // -inf + (-inf * 1) = -inf
5216                   kFP64DefaultNaN);        // -inf + ( inf * 1) = NaN
5217  FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
5218                   kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
5219                   kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
5220                   kFP64DefaultNaN,         // -inf + ( inf * 1) = NaN
5221                   kFP64NegativeInfinity);  // -inf + (-inf * 1) = -inf
5222}
5223
5224
5225static void FmaddFmsubHelper(float n, float m, float a,
5226                             float fmadd, float fmsub,
5227                             float fnmadd, float fnmsub) {
5228  SETUP();
5229  START();
5230
5231  __ Fmov(s0, n);
5232  __ Fmov(s1, m);
5233  __ Fmov(s2, a);
5234  __ Fmadd(s28, s0, s1, s2);
5235  __ Fmsub(s29, s0, s1, s2);
5236  __ Fnmadd(s30, s0, s1, s2);
5237  __ Fnmsub(s31, s0, s1, s2);
5238
5239  END();
5240  RUN();
5241
5242  ASSERT_EQUAL_FP32(fmadd, s28);
5243  ASSERT_EQUAL_FP32(fmsub, s29);
5244  ASSERT_EQUAL_FP32(fnmadd, s30);
5245  ASSERT_EQUAL_FP32(fnmsub, s31);
5246
5247  TEARDOWN();
5248}
5249
5250
5251TEST(fmadd_fmsub_float) {
5252  INIT_V8();
5253  // It's hard to check the result of fused operations because the only way to
5254  // calculate the result is using fma, which is what the simulator uses anyway.
5255  // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5256
5257  // Basic operation.
5258  FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
5259  FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
5260
5261  // Check the sign of exact zeroes.
5262  //               n      m      a      fmadd  fmsub  fnmadd fnmsub
5263  FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5264  FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5265  FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5266  FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5267  FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5268  FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5269  FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5270  FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5271
5272  // Check NaN generation.
5273  FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
5274                   kFP32DefaultNaN, kFP32DefaultNaN,
5275                   kFP32DefaultNaN, kFP32DefaultNaN);
5276  FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
5277                   kFP32DefaultNaN, kFP32DefaultNaN,
5278                   kFP32DefaultNaN, kFP32DefaultNaN);
5279  FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
5280                   kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
5281                   kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
5282                   kFP32NegativeInfinity,   // -inf + (-inf * 1) = -inf
5283                   kFP32DefaultNaN);        // -inf + ( inf * 1) = NaN
5284  FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
5285                   kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
5286                   kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
5287                   kFP32DefaultNaN,         // -inf + ( inf * 1) = NaN
5288                   kFP32NegativeInfinity);  // -inf + (-inf * 1) = -inf
5289}
5290
5291
5292TEST(fmadd_fmsub_double_nans) {
5293  INIT_V8();
5294  // Make sure that NaN propagation works correctly.
5295  double s1 = rawbits_to_double(0x7ff5555511111111);
5296  double s2 = rawbits_to_double(0x7ff5555522222222);
5297  double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
5298  double q1 = rawbits_to_double(0x7ffaaaaa11111111);
5299  double q2 = rawbits_to_double(0x7ffaaaaa22222222);
5300  double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
5301  ASSERT(IsSignallingNaN(s1));
5302  ASSERT(IsSignallingNaN(s2));
5303  ASSERT(IsSignallingNaN(sa));
5304  ASSERT(IsQuietNaN(q1));
5305  ASSERT(IsQuietNaN(q2));
5306  ASSERT(IsQuietNaN(qa));
5307
5308  // The input NaNs after passing through ProcessNaN.
5309  double s1_proc = rawbits_to_double(0x7ffd555511111111);
5310  double s2_proc = rawbits_to_double(0x7ffd555522222222);
5311  double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
5312  double q1_proc = q1;
5313  double q2_proc = q2;
5314  double qa_proc = qa;
5315  ASSERT(IsQuietNaN(s1_proc));
5316  ASSERT(IsQuietNaN(s2_proc));
5317  ASSERT(IsQuietNaN(sa_proc));
5318  ASSERT(IsQuietNaN(q1_proc));
5319  ASSERT(IsQuietNaN(q2_proc));
5320  ASSERT(IsQuietNaN(qa_proc));
5321
5322  // Negated NaNs as it would be done on ARMv8 hardware.
5323  double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
5324  double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
5325  double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
5326  double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
5327  ASSERT(IsQuietNaN(s1_proc_neg));
5328  ASSERT(IsQuietNaN(sa_proc_neg));
5329  ASSERT(IsQuietNaN(q1_proc_neg));
5330  ASSERT(IsQuietNaN(qa_proc_neg));
5331
5332  // Quiet NaNs are propagated.
5333  FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5334  FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5335  FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5336  FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5337  FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5338  FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5339  FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5340
5341  // Signalling NaNs are propagated, and made quiet.
5342  FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5343  FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5344  FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5345  FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5346  FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5347  FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5348  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5349
5350  // Signalling NaNs take precedence over quiet NaNs.
5351  FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5352  FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5353  FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5354  FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5355  FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5356  FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5357  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5358
5359  // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5360  FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
5361                   kFP64DefaultNaN, kFP64DefaultNaN,
5362                   kFP64DefaultNaN, kFP64DefaultNaN);
5363  FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
5364                   kFP64DefaultNaN, kFP64DefaultNaN,
5365                   kFP64DefaultNaN, kFP64DefaultNaN);
5366  FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
5367                   kFP64DefaultNaN, kFP64DefaultNaN,
5368                   kFP64DefaultNaN, kFP64DefaultNaN);
5369  FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5370                   kFP64DefaultNaN, kFP64DefaultNaN,
5371                   kFP64DefaultNaN, kFP64DefaultNaN);
5372}
5373
5374
5375TEST(fmadd_fmsub_float_nans) {
5376  INIT_V8();
5377  // Make sure that NaN propagation works correctly.
5378  float s1 = rawbits_to_float(0x7f951111);
5379  float s2 = rawbits_to_float(0x7f952222);
5380  float sa = rawbits_to_float(0x7f95aaaa);
5381  float q1 = rawbits_to_float(0x7fea1111);
5382  float q2 = rawbits_to_float(0x7fea2222);
5383  float qa = rawbits_to_float(0x7feaaaaa);
5384  ASSERT(IsSignallingNaN(s1));
5385  ASSERT(IsSignallingNaN(s2));
5386  ASSERT(IsSignallingNaN(sa));
5387  ASSERT(IsQuietNaN(q1));
5388  ASSERT(IsQuietNaN(q2));
5389  ASSERT(IsQuietNaN(qa));
5390
5391  // The input NaNs after passing through ProcessNaN.
5392  float s1_proc = rawbits_to_float(0x7fd51111);
5393  float s2_proc = rawbits_to_float(0x7fd52222);
5394  float sa_proc = rawbits_to_float(0x7fd5aaaa);
5395  float q1_proc = q1;
5396  float q2_proc = q2;
5397  float qa_proc = qa;
5398  ASSERT(IsQuietNaN(s1_proc));
5399  ASSERT(IsQuietNaN(s2_proc));
5400  ASSERT(IsQuietNaN(sa_proc));
5401  ASSERT(IsQuietNaN(q1_proc));
5402  ASSERT(IsQuietNaN(q2_proc));
5403  ASSERT(IsQuietNaN(qa_proc));
5404
5405  // Negated NaNs as it would be done on ARMv8 hardware.
5406  float s1_proc_neg = rawbits_to_float(0xffd51111);
5407  float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
5408  float q1_proc_neg = rawbits_to_float(0xffea1111);
5409  float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
5410  ASSERT(IsQuietNaN(s1_proc_neg));
5411  ASSERT(IsQuietNaN(sa_proc_neg));
5412  ASSERT(IsQuietNaN(q1_proc_neg));
5413  ASSERT(IsQuietNaN(qa_proc_neg));
5414
5415  // Quiet NaNs are propagated.
5416  FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5417  FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5418  FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5419  FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5420  FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5421  FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5422  FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5423
5424  // Signalling NaNs are propagated, and made quiet.
5425  FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5426  FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5427  FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5428  FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5429  FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5430  FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5431  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5432
5433  // Signalling NaNs take precedence over quiet NaNs.
5434  FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5435  FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5436  FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5437  FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5438  FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5439  FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5440  FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5441
5442  // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5443  FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
5444                   kFP32DefaultNaN, kFP32DefaultNaN,
5445                   kFP32DefaultNaN, kFP32DefaultNaN);
5446  FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
5447                   kFP32DefaultNaN, kFP32DefaultNaN,
5448                   kFP32DefaultNaN, kFP32DefaultNaN);
5449  FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
5450                   kFP32DefaultNaN, kFP32DefaultNaN,
5451                   kFP32DefaultNaN, kFP32DefaultNaN);
5452  FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
5453                   kFP32DefaultNaN, kFP32DefaultNaN,
5454                   kFP32DefaultNaN, kFP32DefaultNaN);
5455}
5456
5457
5458TEST(fdiv) {
5459  INIT_V8();
5460  SETUP();
5461
5462  START();
5463  __ Fmov(s14, -0.0f);
5464  __ Fmov(s15, kFP32PositiveInfinity);
5465  __ Fmov(s16, kFP32NegativeInfinity);
5466  __ Fmov(s17, 3.25f);
5467  __ Fmov(s18, 2.0f);
5468  __ Fmov(s19, 2.0f);
5469  __ Fmov(s20, -2.0f);
5470
5471  __ Fmov(d26, -0.0);
5472  __ Fmov(d27, kFP64PositiveInfinity);
5473  __ Fmov(d28, kFP64NegativeInfinity);
5474  __ Fmov(d29, 0.0);
5475  __ Fmov(d30, -2.0);
5476  __ Fmov(d31, 2.25);
5477
5478  __ Fdiv(s0, s17, s18);
5479  __ Fdiv(s1, s18, s19);
5480  __ Fdiv(s2, s14, s18);
5481  __ Fdiv(s3, s18, s15);
5482  __ Fdiv(s4, s18, s16);
5483  __ Fdiv(s5, s15, s16);
5484  __ Fdiv(s6, s14, s14);
5485
5486  __ Fdiv(d7, d31, d30);
5487  __ Fdiv(d8, d29, d31);
5488  __ Fdiv(d9, d26, d31);
5489  __ Fdiv(d10, d31, d27);
5490  __ Fdiv(d11, d31, d28);
5491  __ Fdiv(d12, d28, d27);
5492  __ Fdiv(d13, d29, d29);
5493  END();
5494
5495  RUN();
5496
5497  ASSERT_EQUAL_FP32(1.625f, s0);
5498  ASSERT_EQUAL_FP32(1.0f, s1);
5499  ASSERT_EQUAL_FP32(-0.0f, s2);
5500  ASSERT_EQUAL_FP32(0.0f, s3);
5501  ASSERT_EQUAL_FP32(-0.0f, s4);
5502  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5503  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5504  ASSERT_EQUAL_FP64(-1.125, d7);
5505  ASSERT_EQUAL_FP64(0.0, d8);
5506  ASSERT_EQUAL_FP64(-0.0, d9);
5507  ASSERT_EQUAL_FP64(0.0, d10);
5508  ASSERT_EQUAL_FP64(-0.0, d11);
5509  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5510  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
5511
5512  TEARDOWN();
5513}
5514
5515
5516static float MinMaxHelper(float n,
5517                          float m,
5518                          bool min,
5519                          float quiet_nan_substitute = 0.0) {
5520  uint32_t raw_n = float_to_rawbits(n);
5521  uint32_t raw_m = float_to_rawbits(m);
5522
5523  if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
5524    // n is signalling NaN.
5525    return rawbits_to_float(raw_n | kSQuietNanMask);
5526  } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
5527    // m is signalling NaN.
5528    return rawbits_to_float(raw_m | kSQuietNanMask);
5529  } else if (quiet_nan_substitute == 0.0) {
5530    if (std::isnan(n)) {
5531      // n is quiet NaN.
5532      return n;
5533    } else if (std::isnan(m)) {
5534      // m is quiet NaN.
5535      return m;
5536    }
5537  } else {
5538    // Substitute n or m if one is quiet, but not both.
5539    if (std::isnan(n) && !std::isnan(m)) {
5540      // n is quiet NaN: replace with substitute.
5541      n = quiet_nan_substitute;
5542    } else if (!std::isnan(n) && std::isnan(m)) {
5543      // m is quiet NaN: replace with substitute.
5544      m = quiet_nan_substitute;
5545    }
5546  }
5547
5548  if ((n == 0.0) && (m == 0.0) &&
5549      (copysign(1.0, n) != copysign(1.0, m))) {
5550    return min ? -0.0 : 0.0;
5551  }
5552
5553  return min ? fminf(n, m) : fmaxf(n, m);
5554}
5555
5556
5557static double MinMaxHelper(double n,
5558                           double m,
5559                           bool min,
5560                           double quiet_nan_substitute = 0.0) {
5561  uint64_t raw_n = double_to_rawbits(n);
5562  uint64_t raw_m = double_to_rawbits(m);
5563
5564  if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
5565    // n is signalling NaN.
5566    return rawbits_to_double(raw_n | kDQuietNanMask);
5567  } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
5568    // m is signalling NaN.
5569    return rawbits_to_double(raw_m | kDQuietNanMask);
5570  } else if (quiet_nan_substitute == 0.0) {
5571    if (std::isnan(n)) {
5572      // n is quiet NaN.
5573      return n;
5574    } else if (std::isnan(m)) {
5575      // m is quiet NaN.
5576      return m;
5577    }
5578  } else {
5579    // Substitute n or m if one is quiet, but not both.
5580    if (std::isnan(n) && !std::isnan(m)) {
5581      // n is quiet NaN: replace with substitute.
5582      n = quiet_nan_substitute;
5583    } else if (!std::isnan(n) && std::isnan(m)) {
5584      // m is quiet NaN: replace with substitute.
5585      m = quiet_nan_substitute;
5586    }
5587  }
5588
5589  if ((n == 0.0) && (m == 0.0) &&
5590      (copysign(1.0, n) != copysign(1.0, m))) {
5591    return min ? -0.0 : 0.0;
5592  }
5593
5594  return min ? fmin(n, m) : fmax(n, m);
5595}
5596
5597
5598static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5599                                 double minnm, double maxnm) {
5600  SETUP();
5601
5602  START();
5603  __ Fmov(d0, n);
5604  __ Fmov(d1, m);
5605  __ Fmin(d28, d0, d1);
5606  __ Fmax(d29, d0, d1);
5607  __ Fminnm(d30, d0, d1);
5608  __ Fmaxnm(d31, d0, d1);
5609  END();
5610
5611  RUN();
5612
5613  ASSERT_EQUAL_FP64(min, d28);
5614  ASSERT_EQUAL_FP64(max, d29);
5615  ASSERT_EQUAL_FP64(minnm, d30);
5616  ASSERT_EQUAL_FP64(maxnm, d31);
5617
5618  TEARDOWN();
5619}
5620
5621
5622TEST(fmax_fmin_d) {
5623  INIT_V8();
5624  // Use non-standard NaNs to check that the payload bits are preserved.
5625  double snan = rawbits_to_double(0x7ff5555512345678);
5626  double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5627
5628  double snan_processed = rawbits_to_double(0x7ffd555512345678);
5629  double qnan_processed = qnan;
5630
5631  ASSERT(IsSignallingNaN(snan));
5632  ASSERT(IsQuietNaN(qnan));
5633  ASSERT(IsQuietNaN(snan_processed));
5634  ASSERT(IsQuietNaN(qnan_processed));
5635
5636  // Bootstrap tests.
5637  FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5638  FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5639  FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5640                       kFP64NegativeInfinity, kFP64PositiveInfinity,
5641                       kFP64NegativeInfinity, kFP64PositiveInfinity);
5642  FminFmaxDoubleHelper(snan, 0,
5643                       snan_processed, snan_processed,
5644                       snan_processed, snan_processed);
5645  FminFmaxDoubleHelper(0, snan,
5646                       snan_processed, snan_processed,
5647                       snan_processed, snan_processed);
5648  FminFmaxDoubleHelper(qnan, 0,
5649                       qnan_processed, qnan_processed,
5650                       0, 0);
5651  FminFmaxDoubleHelper(0, qnan,
5652                       qnan_processed, qnan_processed,
5653                       0, 0);
5654  FminFmaxDoubleHelper(qnan, snan,
5655                       snan_processed, snan_processed,
5656                       snan_processed, snan_processed);
5657  FminFmaxDoubleHelper(snan, qnan,
5658                       snan_processed, snan_processed,
5659                       snan_processed, snan_processed);
5660
5661  // Iterate over all combinations of inputs.
5662  double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5663                      -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5664                      kFP64PositiveInfinity, kFP64NegativeInfinity,
5665                      kFP64QuietNaN, kFP64SignallingNaN };
5666
5667  const int count = sizeof(inputs) / sizeof(inputs[0]);
5668
5669  for (int in = 0; in < count; in++) {
5670    double n = inputs[in];
5671    for (int im = 0; im < count; im++) {
5672      double m = inputs[im];
5673      FminFmaxDoubleHelper(n, m,
5674                           MinMaxHelper(n, m, true),
5675                           MinMaxHelper(n, m, false),
5676                           MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5677                           MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5678    }
5679  }
5680}
5681
5682
5683static void FminFmaxFloatHelper(float n, float m, float min, float max,
5684                                float minnm, float maxnm) {
5685  SETUP();
5686
5687  START();
5688  __ Fmov(s0, n);
5689  __ Fmov(s1, m);
5690  __ Fmin(s28, s0, s1);
5691  __ Fmax(s29, s0, s1);
5692  __ Fminnm(s30, s0, s1);
5693  __ Fmaxnm(s31, s0, s1);
5694  END();
5695
5696  RUN();
5697
5698  ASSERT_EQUAL_FP32(min, s28);
5699  ASSERT_EQUAL_FP32(max, s29);
5700  ASSERT_EQUAL_FP32(minnm, s30);
5701  ASSERT_EQUAL_FP32(maxnm, s31);
5702
5703  TEARDOWN();
5704}
5705
5706
5707TEST(fmax_fmin_s) {
5708  INIT_V8();
5709  // Use non-standard NaNs to check that the payload bits are preserved.
5710  float snan = rawbits_to_float(0x7f951234);
5711  float qnan = rawbits_to_float(0x7fea8765);
5712
5713  float snan_processed = rawbits_to_float(0x7fd51234);
5714  float qnan_processed = qnan;
5715
5716  ASSERT(IsSignallingNaN(snan));
5717  ASSERT(IsQuietNaN(qnan));
5718  ASSERT(IsQuietNaN(snan_processed));
5719  ASSERT(IsQuietNaN(qnan_processed));
5720
5721  // Bootstrap tests.
5722  FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5723  FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5724  FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5725                      kFP32NegativeInfinity, kFP32PositiveInfinity,
5726                      kFP32NegativeInfinity, kFP32PositiveInfinity);
5727  FminFmaxFloatHelper(snan, 0,
5728                      snan_processed, snan_processed,
5729                      snan_processed, snan_processed);
5730  FminFmaxFloatHelper(0, snan,
5731                      snan_processed, snan_processed,
5732                      snan_processed, snan_processed);
5733  FminFmaxFloatHelper(qnan, 0,
5734                      qnan_processed, qnan_processed,
5735                      0, 0);
5736  FminFmaxFloatHelper(0, qnan,
5737                      qnan_processed, qnan_processed,
5738                      0, 0);
5739  FminFmaxFloatHelper(qnan, snan,
5740                      snan_processed, snan_processed,
5741                      snan_processed, snan_processed);
5742  FminFmaxFloatHelper(snan, qnan,
5743                      snan_processed, snan_processed,
5744                      snan_processed, snan_processed);
5745
5746  // Iterate over all combinations of inputs.
5747  float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5748                     -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5749                     kFP32PositiveInfinity, kFP32NegativeInfinity,
5750                     kFP32QuietNaN, kFP32SignallingNaN };
5751
5752  const int count = sizeof(inputs) / sizeof(inputs[0]);
5753
5754  for (int in = 0; in < count; in++) {
5755    float n = inputs[in];
5756    for (int im = 0; im < count; im++) {
5757      float m = inputs[im];
5758      FminFmaxFloatHelper(n, m,
5759                          MinMaxHelper(n, m, true),
5760                          MinMaxHelper(n, m, false),
5761                          MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5762                          MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5763    }
5764  }
5765}
5766
5767
5768TEST(fccmp) {
5769  INIT_V8();
5770  SETUP();
5771
5772  START();
5773  __ Fmov(s16, 0.0);
5774  __ Fmov(s17, 0.5);
5775  __ Fmov(d18, -0.5);
5776  __ Fmov(d19, -1.0);
5777  __ Mov(x20, 0);
5778
5779  __ Cmp(x20, 0);
5780  __ Fccmp(s16, s16, NoFlag, eq);
5781  __ Mrs(x0, NZCV);
5782
5783  __ Cmp(x20, 0);
5784  __ Fccmp(s16, s16, VFlag, ne);
5785  __ Mrs(x1, NZCV);
5786
5787  __ Cmp(x20, 0);
5788  __ Fccmp(s16, s17, CFlag, ge);
5789  __ Mrs(x2, NZCV);
5790
5791  __ Cmp(x20, 0);
5792  __ Fccmp(s16, s17, CVFlag, lt);
5793  __ Mrs(x3, NZCV);
5794
5795  __ Cmp(x20, 0);
5796  __ Fccmp(d18, d18, ZFlag, le);
5797  __ Mrs(x4, NZCV);
5798
5799  __ Cmp(x20, 0);
5800  __ Fccmp(d18, d18, ZVFlag, gt);
5801  __ Mrs(x5, NZCV);
5802
5803  __ Cmp(x20, 0);
5804  __ Fccmp(d18, d19, ZCVFlag, ls);
5805  __ Mrs(x6, NZCV);
5806
5807  __ Cmp(x20, 0);
5808  __ Fccmp(d18, d19, NFlag, hi);
5809  __ Mrs(x7, NZCV);
5810
5811  __ fccmp(s16, s16, NFlag, al);
5812  __ Mrs(x8, NZCV);
5813
5814  __ fccmp(d18, d18, NFlag, nv);
5815  __ Mrs(x9, NZCV);
5816
5817  END();
5818
5819  RUN();
5820
5821  ASSERT_EQUAL_32(ZCFlag, w0);
5822  ASSERT_EQUAL_32(VFlag, w1);
5823  ASSERT_EQUAL_32(NFlag, w2);
5824  ASSERT_EQUAL_32(CVFlag, w3);
5825  ASSERT_EQUAL_32(ZCFlag, w4);
5826  ASSERT_EQUAL_32(ZVFlag, w5);
5827  ASSERT_EQUAL_32(CFlag, w6);
5828  ASSERT_EQUAL_32(NFlag, w7);
5829  ASSERT_EQUAL_32(ZCFlag, w8);
5830  ASSERT_EQUAL_32(ZCFlag, w9);
5831
5832  TEARDOWN();
5833}
5834
5835
5836TEST(fcmp) {
5837  INIT_V8();
5838  SETUP();
5839
5840  START();
5841
5842  // Some of these tests require a floating-point scratch register assigned to
5843  // the macro assembler, but most do not.
5844  {
5845    // We're going to mess around with the available scratch registers in this
5846    // test. A UseScratchRegisterScope will make sure that they are restored to
5847    // the default values once we're finished.
5848    UseScratchRegisterScope temps(&masm);
5849    masm.FPTmpList()->set_list(0);
5850
5851    __ Fmov(s8, 0.0);
5852    __ Fmov(s9, 0.5);
5853    __ Mov(w18, 0x7f800001);  // Single precision NaN.
5854    __ Fmov(s18, w18);
5855
5856    __ Fcmp(s8, s8);
5857    __ Mrs(x0, NZCV);
5858    __ Fcmp(s8, s9);
5859    __ Mrs(x1, NZCV);
5860    __ Fcmp(s9, s8);
5861    __ Mrs(x2, NZCV);
5862    __ Fcmp(s8, s18);
5863    __ Mrs(x3, NZCV);
5864    __ Fcmp(s18, s18);
5865    __ Mrs(x4, NZCV);
5866    __ Fcmp(s8, 0.0);
5867    __ Mrs(x5, NZCV);
5868    masm.FPTmpList()->set_list(d0.Bit());
5869    __ Fcmp(s8, 255.0);
5870    masm.FPTmpList()->set_list(0);
5871    __ Mrs(x6, NZCV);
5872
5873    __ Fmov(d19, 0.0);
5874    __ Fmov(d20, 0.5);
5875    __ Mov(x21, 0x7ff0000000000001UL);   // Double precision NaN.
5876    __ Fmov(d21, x21);
5877
5878    __ Fcmp(d19, d19);
5879    __ Mrs(x10, NZCV);
5880    __ Fcmp(d19, d20);
5881    __ Mrs(x11, NZCV);
5882    __ Fcmp(d20, d19);
5883    __ Mrs(x12, NZCV);
5884    __ Fcmp(d19, d21);
5885    __ Mrs(x13, NZCV);
5886    __ Fcmp(d21, d21);
5887    __ Mrs(x14, NZCV);
5888    __ Fcmp(d19, 0.0);
5889    __ Mrs(x15, NZCV);
5890    masm.FPTmpList()->set_list(d0.Bit());
5891    __ Fcmp(d19, 12.3456);
5892    masm.FPTmpList()->set_list(0);
5893    __ Mrs(x16, NZCV);
5894  }
5895
5896  END();
5897
5898  RUN();
5899
5900  ASSERT_EQUAL_32(ZCFlag, w0);
5901  ASSERT_EQUAL_32(NFlag, w1);
5902  ASSERT_EQUAL_32(CFlag, w2);
5903  ASSERT_EQUAL_32(CVFlag, w3);
5904  ASSERT_EQUAL_32(CVFlag, w4);
5905  ASSERT_EQUAL_32(ZCFlag, w5);
5906  ASSERT_EQUAL_32(NFlag, w6);
5907  ASSERT_EQUAL_32(ZCFlag, w10);
5908  ASSERT_EQUAL_32(NFlag, w11);
5909  ASSERT_EQUAL_32(CFlag, w12);
5910  ASSERT_EQUAL_32(CVFlag, w13);
5911  ASSERT_EQUAL_32(CVFlag, w14);
5912  ASSERT_EQUAL_32(ZCFlag, w15);
5913  ASSERT_EQUAL_32(NFlag, w16);
5914
5915  TEARDOWN();
5916}
5917
5918
5919TEST(fcsel) {
5920  INIT_V8();
5921  SETUP();
5922
5923  START();
5924  __ Mov(x16, 0);
5925  __ Fmov(s16, 1.0);
5926  __ Fmov(s17, 2.0);
5927  __ Fmov(d18, 3.0);
5928  __ Fmov(d19, 4.0);
5929
5930  __ Cmp(x16, 0);
5931  __ Fcsel(s0, s16, s17, eq);
5932  __ Fcsel(s1, s16, s17, ne);
5933  __ Fcsel(d2, d18, d19, eq);
5934  __ Fcsel(d3, d18, d19, ne);
5935  __ fcsel(s4, s16, s17, al);
5936  __ fcsel(d5, d18, d19, nv);
5937  END();
5938
5939  RUN();
5940
5941  ASSERT_EQUAL_FP32(1.0, s0);
5942  ASSERT_EQUAL_FP32(2.0, s1);
5943  ASSERT_EQUAL_FP64(3.0, d2);
5944  ASSERT_EQUAL_FP64(4.0, d3);
5945  ASSERT_EQUAL_FP32(1.0, s4);
5946  ASSERT_EQUAL_FP64(3.0, d5);
5947
5948  TEARDOWN();
5949}
5950
5951
5952TEST(fneg) {
5953  INIT_V8();
5954  SETUP();
5955
5956  START();
5957  __ Fmov(s16, 1.0);
5958  __ Fmov(s17, 0.0);
5959  __ Fmov(s18, kFP32PositiveInfinity);
5960  __ Fmov(d19, 1.0);
5961  __ Fmov(d20, 0.0);
5962  __ Fmov(d21, kFP64PositiveInfinity);
5963
5964  __ Fneg(s0, s16);
5965  __ Fneg(s1, s0);
5966  __ Fneg(s2, s17);
5967  __ Fneg(s3, s2);
5968  __ Fneg(s4, s18);
5969  __ Fneg(s5, s4);
5970  __ Fneg(d6, d19);
5971  __ Fneg(d7, d6);
5972  __ Fneg(d8, d20);
5973  __ Fneg(d9, d8);
5974  __ Fneg(d10, d21);
5975  __ Fneg(d11, d10);
5976  END();
5977
5978  RUN();
5979
5980  ASSERT_EQUAL_FP32(-1.0, s0);
5981  ASSERT_EQUAL_FP32(1.0, s1);
5982  ASSERT_EQUAL_FP32(-0.0, s2);
5983  ASSERT_EQUAL_FP32(0.0, s3);
5984  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
5985  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5986  ASSERT_EQUAL_FP64(-1.0, d6);
5987  ASSERT_EQUAL_FP64(1.0, d7);
5988  ASSERT_EQUAL_FP64(-0.0, d8);
5989  ASSERT_EQUAL_FP64(0.0, d9);
5990  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5991  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5992
5993  TEARDOWN();
5994}
5995
5996
5997TEST(fabs) {
5998  INIT_V8();
5999  SETUP();
6000
6001  START();
6002  __ Fmov(s16, -1.0);
6003  __ Fmov(s17, -0.0);
6004  __ Fmov(s18, kFP32NegativeInfinity);
6005  __ Fmov(d19, -1.0);
6006  __ Fmov(d20, -0.0);
6007  __ Fmov(d21, kFP64NegativeInfinity);
6008
6009  __ Fabs(s0, s16);
6010  __ Fabs(s1, s0);
6011  __ Fabs(s2, s17);
6012  __ Fabs(s3, s18);
6013  __ Fabs(d4, d19);
6014  __ Fabs(d5, d4);
6015  __ Fabs(d6, d20);
6016  __ Fabs(d7, d21);
6017  END();
6018
6019  RUN();
6020
6021  ASSERT_EQUAL_FP32(1.0, s0);
6022  ASSERT_EQUAL_FP32(1.0, s1);
6023  ASSERT_EQUAL_FP32(0.0, s2);
6024  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
6025  ASSERT_EQUAL_FP64(1.0, d4);
6026  ASSERT_EQUAL_FP64(1.0, d5);
6027  ASSERT_EQUAL_FP64(0.0, d6);
6028  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6029
6030  TEARDOWN();
6031}
6032
6033
6034TEST(fsqrt) {
6035  INIT_V8();
6036  SETUP();
6037
6038  START();
6039  __ Fmov(s16, 0.0);
6040  __ Fmov(s17, 1.0);
6041  __ Fmov(s18, 0.25);
6042  __ Fmov(s19, 65536.0);
6043  __ Fmov(s20, -0.0);
6044  __ Fmov(s21, kFP32PositiveInfinity);
6045  __ Fmov(s22, -1.0);
6046  __ Fmov(d23, 0.0);
6047  __ Fmov(d24, 1.0);
6048  __ Fmov(d25, 0.25);
6049  __ Fmov(d26, 4294967296.0);
6050  __ Fmov(d27, -0.0);
6051  __ Fmov(d28, kFP64PositiveInfinity);
6052  __ Fmov(d29, -1.0);
6053
6054  __ Fsqrt(s0, s16);
6055  __ Fsqrt(s1, s17);
6056  __ Fsqrt(s2, s18);
6057  __ Fsqrt(s3, s19);
6058  __ Fsqrt(s4, s20);
6059  __ Fsqrt(s5, s21);
6060  __ Fsqrt(s6, s22);
6061  __ Fsqrt(d7, d23);
6062  __ Fsqrt(d8, d24);
6063  __ Fsqrt(d9, d25);
6064  __ Fsqrt(d10, d26);
6065  __ Fsqrt(d11, d27);
6066  __ Fsqrt(d12, d28);
6067  __ Fsqrt(d13, d29);
6068  END();
6069
6070  RUN();
6071
6072  ASSERT_EQUAL_FP32(0.0, s0);
6073  ASSERT_EQUAL_FP32(1.0, s1);
6074  ASSERT_EQUAL_FP32(0.5, s2);
6075  ASSERT_EQUAL_FP32(256.0, s3);
6076  ASSERT_EQUAL_FP32(-0.0, s4);
6077  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
6078  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
6079  ASSERT_EQUAL_FP64(0.0, d7);
6080  ASSERT_EQUAL_FP64(1.0, d8);
6081  ASSERT_EQUAL_FP64(0.5, d9);
6082  ASSERT_EQUAL_FP64(65536.0, d10);
6083  ASSERT_EQUAL_FP64(-0.0, d11);
6084  ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
6085  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
6086
6087  TEARDOWN();
6088}
6089
6090
6091TEST(frinta) {
6092  INIT_V8();
6093  SETUP();
6094
6095  START();
6096  __ Fmov(s16, 1.0);
6097  __ Fmov(s17, 1.1);
6098  __ Fmov(s18, 1.5);
6099  __ Fmov(s19, 1.9);
6100  __ Fmov(s20, 2.5);
6101  __ Fmov(s21, -1.5);
6102  __ Fmov(s22, -2.5);
6103  __ Fmov(s23, kFP32PositiveInfinity);
6104  __ Fmov(s24, kFP32NegativeInfinity);
6105  __ Fmov(s25, 0.0);
6106  __ Fmov(s26, -0.0);
6107  __ Fmov(s27, -0.2);
6108
6109  __ Frinta(s0, s16);
6110  __ Frinta(s1, s17);
6111  __ Frinta(s2, s18);
6112  __ Frinta(s3, s19);
6113  __ Frinta(s4, s20);
6114  __ Frinta(s5, s21);
6115  __ Frinta(s6, s22);
6116  __ Frinta(s7, s23);
6117  __ Frinta(s8, s24);
6118  __ Frinta(s9, s25);
6119  __ Frinta(s10, s26);
6120  __ Frinta(s11, s27);
6121
6122  __ Fmov(d16, 1.0);
6123  __ Fmov(d17, 1.1);
6124  __ Fmov(d18, 1.5);
6125  __ Fmov(d19, 1.9);
6126  __ Fmov(d20, 2.5);
6127  __ Fmov(d21, -1.5);
6128  __ Fmov(d22, -2.5);
6129  __ Fmov(d23, kFP32PositiveInfinity);
6130  __ Fmov(d24, kFP32NegativeInfinity);
6131  __ Fmov(d25, 0.0);
6132  __ Fmov(d26, -0.0);
6133  __ Fmov(d27, -0.2);
6134
6135  __ Frinta(d12, d16);
6136  __ Frinta(d13, d17);
6137  __ Frinta(d14, d18);
6138  __ Frinta(d15, d19);
6139  __ Frinta(d16, d20);
6140  __ Frinta(d17, d21);
6141  __ Frinta(d18, d22);
6142  __ Frinta(d19, d23);
6143  __ Frinta(d20, d24);
6144  __ Frinta(d21, d25);
6145  __ Frinta(d22, d26);
6146  __ Frinta(d23, d27);
6147  END();
6148
6149  RUN();
6150
6151  ASSERT_EQUAL_FP32(1.0, s0);
6152  ASSERT_EQUAL_FP32(1.0, s1);
6153  ASSERT_EQUAL_FP32(2.0, s2);
6154  ASSERT_EQUAL_FP32(2.0, s3);
6155  ASSERT_EQUAL_FP32(3.0, s4);
6156  ASSERT_EQUAL_FP32(-2.0, s5);
6157  ASSERT_EQUAL_FP32(-3.0, s6);
6158  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6159  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6160  ASSERT_EQUAL_FP32(0.0, s9);
6161  ASSERT_EQUAL_FP32(-0.0, s10);
6162  ASSERT_EQUAL_FP32(-0.0, s11);
6163  ASSERT_EQUAL_FP64(1.0, d12);
6164  ASSERT_EQUAL_FP64(1.0, d13);
6165  ASSERT_EQUAL_FP64(2.0, d14);
6166  ASSERT_EQUAL_FP64(2.0, d15);
6167  ASSERT_EQUAL_FP64(3.0, d16);
6168  ASSERT_EQUAL_FP64(-2.0, d17);
6169  ASSERT_EQUAL_FP64(-3.0, d18);
6170  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
6171  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
6172  ASSERT_EQUAL_FP64(0.0, d21);
6173  ASSERT_EQUAL_FP64(-0.0, d22);
6174  ASSERT_EQUAL_FP64(-0.0, d23);
6175
6176  TEARDOWN();
6177}
6178
6179
6180TEST(frintm) {
6181  INIT_V8();
6182  SETUP();
6183
6184  START();
6185  __ Fmov(s16, 1.0);
6186  __ Fmov(s17, 1.1);
6187  __ Fmov(s18, 1.5);
6188  __ Fmov(s19, 1.9);
6189  __ Fmov(s20, 2.5);
6190  __ Fmov(s21, -1.5);
6191  __ Fmov(s22, -2.5);
6192  __ Fmov(s23, kFP32PositiveInfinity);
6193  __ Fmov(s24, kFP32NegativeInfinity);
6194  __ Fmov(s25, 0.0);
6195  __ Fmov(s26, -0.0);
6196  __ Fmov(s27, -0.2);
6197
6198  __ Frintm(s0, s16);
6199  __ Frintm(s1, s17);
6200  __ Frintm(s2, s18);
6201  __ Frintm(s3, s19);
6202  __ Frintm(s4, s20);
6203  __ Frintm(s5, s21);
6204  __ Frintm(s6, s22);
6205  __ Frintm(s7, s23);
6206  __ Frintm(s8, s24);
6207  __ Frintm(s9, s25);
6208  __ Frintm(s10, s26);
6209  __ Frintm(s11, s27);
6210
6211  __ Fmov(d16, 1.0);
6212  __ Fmov(d17, 1.1);
6213  __ Fmov(d18, 1.5);
6214  __ Fmov(d19, 1.9);
6215  __ Fmov(d20, 2.5);
6216  __ Fmov(d21, -1.5);
6217  __ Fmov(d22, -2.5);
6218  __ Fmov(d23, kFP32PositiveInfinity);
6219  __ Fmov(d24, kFP32NegativeInfinity);
6220  __ Fmov(d25, 0.0);
6221  __ Fmov(d26, -0.0);
6222  __ Fmov(d27, -0.2);
6223
6224  __ Frintm(d12, d16);
6225  __ Frintm(d13, d17);
6226  __ Frintm(d14, d18);
6227  __ Frintm(d15, d19);
6228  __ Frintm(d16, d20);
6229  __ Frintm(d17, d21);
6230  __ Frintm(d18, d22);
6231  __ Frintm(d19, d23);
6232  __ Frintm(d20, d24);
6233  __ Frintm(d21, d25);
6234  __ Frintm(d22, d26);
6235  __ Frintm(d23, d27);
6236  END();
6237
6238  RUN();
6239
6240  ASSERT_EQUAL_FP32(1.0, s0);
6241  ASSERT_EQUAL_FP32(1.0, s1);
6242  ASSERT_EQUAL_FP32(1.0, s2);
6243  ASSERT_EQUAL_FP32(1.0, s3);
6244  ASSERT_EQUAL_FP32(2.0, s4);
6245  ASSERT_EQUAL_FP32(-2.0, s5);
6246  ASSERT_EQUAL_FP32(-3.0, s6);
6247  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6248  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6249  ASSERT_EQUAL_FP32(0.0, s9);
6250  ASSERT_EQUAL_FP32(-0.0, s10);
6251  ASSERT_EQUAL_FP32(-1.0, s11);
6252  ASSERT_EQUAL_FP64(1.0, d12);
6253  ASSERT_EQUAL_FP64(1.0, d13);
6254  ASSERT_EQUAL_FP64(1.0, d14);
6255  ASSERT_EQUAL_FP64(1.0, d15);
6256  ASSERT_EQUAL_FP64(2.0, d16);
6257  ASSERT_EQUAL_FP64(-2.0, d17);
6258  ASSERT_EQUAL_FP64(-3.0, d18);
6259  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
6260  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
6261  ASSERT_EQUAL_FP64(0.0, d21);
6262  ASSERT_EQUAL_FP64(-0.0, d22);
6263  ASSERT_EQUAL_FP64(-1.0, d23);
6264
6265  TEARDOWN();
6266}
6267
6268
6269TEST(frintn) {
6270  INIT_V8();
6271  SETUP();
6272
6273  START();
6274  __ Fmov(s16, 1.0);
6275  __ Fmov(s17, 1.1);
6276  __ Fmov(s18, 1.5);
6277  __ Fmov(s19, 1.9);
6278  __ Fmov(s20, 2.5);
6279  __ Fmov(s21, -1.5);
6280  __ Fmov(s22, -2.5);
6281  __ Fmov(s23, kFP32PositiveInfinity);
6282  __ Fmov(s24, kFP32NegativeInfinity);
6283  __ Fmov(s25, 0.0);
6284  __ Fmov(s26, -0.0);
6285  __ Fmov(s27, -0.2);
6286
6287  __ Frintn(s0, s16);
6288  __ Frintn(s1, s17);
6289  __ Frintn(s2, s18);
6290  __ Frintn(s3, s19);
6291  __ Frintn(s4, s20);
6292  __ Frintn(s5, s21);
6293  __ Frintn(s6, s22);
6294  __ Frintn(s7, s23);
6295  __ Frintn(s8, s24);
6296  __ Frintn(s9, s25);
6297  __ Frintn(s10, s26);
6298  __ Frintn(s11, s27);
6299
6300  __ Fmov(d16, 1.0);
6301  __ Fmov(d17, 1.1);
6302  __ Fmov(d18, 1.5);
6303  __ Fmov(d19, 1.9);
6304  __ Fmov(d20, 2.5);
6305  __ Fmov(d21, -1.5);
6306  __ Fmov(d22, -2.5);
6307  __ Fmov(d23, kFP32PositiveInfinity);
6308  __ Fmov(d24, kFP32NegativeInfinity);
6309  __ Fmov(d25, 0.0);
6310  __ Fmov(d26, -0.0);
6311  __ Fmov(d27, -0.2);
6312
6313  __ Frintn(d12, d16);
6314  __ Frintn(d13, d17);
6315  __ Frintn(d14, d18);
6316  __ Frintn(d15, d19);
6317  __ Frintn(d16, d20);
6318  __ Frintn(d17, d21);
6319  __ Frintn(d18, d22);
6320  __ Frintn(d19, d23);
6321  __ Frintn(d20, d24);
6322  __ Frintn(d21, d25);
6323  __ Frintn(d22, d26);
6324  __ Frintn(d23, d27);
6325  END();
6326
6327  RUN();
6328
6329  ASSERT_EQUAL_FP32(1.0, s0);
6330  ASSERT_EQUAL_FP32(1.0, s1);
6331  ASSERT_EQUAL_FP32(2.0, s2);
6332  ASSERT_EQUAL_FP32(2.0, s3);
6333  ASSERT_EQUAL_FP32(2.0, s4);
6334  ASSERT_EQUAL_FP32(-2.0, s5);
6335  ASSERT_EQUAL_FP32(-2.0, s6);
6336  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6337  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6338  ASSERT_EQUAL_FP32(0.0, s9);
6339  ASSERT_EQUAL_FP32(-0.0, s10);
6340  ASSERT_EQUAL_FP32(-0.0, s11);
6341  ASSERT_EQUAL_FP64(1.0, d12);
6342  ASSERT_EQUAL_FP64(1.0, d13);
6343  ASSERT_EQUAL_FP64(2.0, d14);
6344  ASSERT_EQUAL_FP64(2.0, d15);
6345  ASSERT_EQUAL_FP64(2.0, d16);
6346  ASSERT_EQUAL_FP64(-2.0, d17);
6347  ASSERT_EQUAL_FP64(-2.0, d18);
6348  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
6349  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
6350  ASSERT_EQUAL_FP64(0.0, d21);
6351  ASSERT_EQUAL_FP64(-0.0, d22);
6352  ASSERT_EQUAL_FP64(-0.0, d23);
6353
6354  TEARDOWN();
6355}
6356
6357
6358TEST(frintz) {
6359  INIT_V8();
6360  SETUP();
6361
6362  START();
6363  __ Fmov(s16, 1.0);
6364  __ Fmov(s17, 1.1);
6365  __ Fmov(s18, 1.5);
6366  __ Fmov(s19, 1.9);
6367  __ Fmov(s20, 2.5);
6368  __ Fmov(s21, -1.5);
6369  __ Fmov(s22, -2.5);
6370  __ Fmov(s23, kFP32PositiveInfinity);
6371  __ Fmov(s24, kFP32NegativeInfinity);
6372  __ Fmov(s25, 0.0);
6373  __ Fmov(s26, -0.0);
6374
6375  __ Frintz(s0, s16);
6376  __ Frintz(s1, s17);
6377  __ Frintz(s2, s18);
6378  __ Frintz(s3, s19);
6379  __ Frintz(s4, s20);
6380  __ Frintz(s5, s21);
6381  __ Frintz(s6, s22);
6382  __ Frintz(s7, s23);
6383  __ Frintz(s8, s24);
6384  __ Frintz(s9, s25);
6385  __ Frintz(s10, s26);
6386
6387  __ Fmov(d16, 1.0);
6388  __ Fmov(d17, 1.1);
6389  __ Fmov(d18, 1.5);
6390  __ Fmov(d19, 1.9);
6391  __ Fmov(d20, 2.5);
6392  __ Fmov(d21, -1.5);
6393  __ Fmov(d22, -2.5);
6394  __ Fmov(d23, kFP32PositiveInfinity);
6395  __ Fmov(d24, kFP32NegativeInfinity);
6396  __ Fmov(d25, 0.0);
6397  __ Fmov(d26, -0.0);
6398
6399  __ Frintz(d11, d16);
6400  __ Frintz(d12, d17);
6401  __ Frintz(d13, d18);
6402  __ Frintz(d14, d19);
6403  __ Frintz(d15, d20);
6404  __ Frintz(d16, d21);
6405  __ Frintz(d17, d22);
6406  __ Frintz(d18, d23);
6407  __ Frintz(d19, d24);
6408  __ Frintz(d20, d25);
6409  __ Frintz(d21, d26);
6410  END();
6411
6412  RUN();
6413
6414  ASSERT_EQUAL_FP32(1.0, s0);
6415  ASSERT_EQUAL_FP32(1.0, s1);
6416  ASSERT_EQUAL_FP32(1.0, s2);
6417  ASSERT_EQUAL_FP32(1.0, s3);
6418  ASSERT_EQUAL_FP32(2.0, s4);
6419  ASSERT_EQUAL_FP32(-1.0, s5);
6420  ASSERT_EQUAL_FP32(-2.0, s6);
6421  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6422  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6423  ASSERT_EQUAL_FP32(0.0, s9);
6424  ASSERT_EQUAL_FP32(-0.0, s10);
6425  ASSERT_EQUAL_FP64(1.0, d11);
6426  ASSERT_EQUAL_FP64(1.0, d12);
6427  ASSERT_EQUAL_FP64(1.0, d13);
6428  ASSERT_EQUAL_FP64(1.0, d14);
6429  ASSERT_EQUAL_FP64(2.0, d15);
6430  ASSERT_EQUAL_FP64(-1.0, d16);
6431  ASSERT_EQUAL_FP64(-2.0, d17);
6432  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6433  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6434  ASSERT_EQUAL_FP64(0.0, d20);
6435  ASSERT_EQUAL_FP64(-0.0, d21);
6436
6437  TEARDOWN();
6438}
6439
6440
6441TEST(fcvt_ds) {
6442  INIT_V8();
6443  SETUP();
6444
6445  START();
6446  __ Fmov(s16, 1.0);
6447  __ Fmov(s17, 1.1);
6448  __ Fmov(s18, 1.5);
6449  __ Fmov(s19, 1.9);
6450  __ Fmov(s20, 2.5);
6451  __ Fmov(s21, -1.5);
6452  __ Fmov(s22, -2.5);
6453  __ Fmov(s23, kFP32PositiveInfinity);
6454  __ Fmov(s24, kFP32NegativeInfinity);
6455  __ Fmov(s25, 0.0);
6456  __ Fmov(s26, -0.0);
6457  __ Fmov(s27, FLT_MAX);
6458  __ Fmov(s28, FLT_MIN);
6459  __ Fmov(s29, rawbits_to_float(0x7fc12345));   // Quiet NaN.
6460  __ Fmov(s30, rawbits_to_float(0x7f812345));   // Signalling NaN.
6461
6462  __ Fcvt(d0, s16);
6463  __ Fcvt(d1, s17);
6464  __ Fcvt(d2, s18);
6465  __ Fcvt(d3, s19);
6466  __ Fcvt(d4, s20);
6467  __ Fcvt(d5, s21);
6468  __ Fcvt(d6, s22);
6469  __ Fcvt(d7, s23);
6470  __ Fcvt(d8, s24);
6471  __ Fcvt(d9, s25);
6472  __ Fcvt(d10, s26);
6473  __ Fcvt(d11, s27);
6474  __ Fcvt(d12, s28);
6475  __ Fcvt(d13, s29);
6476  __ Fcvt(d14, s30);
6477  END();
6478
6479  RUN();
6480
6481  ASSERT_EQUAL_FP64(1.0f, d0);
6482  ASSERT_EQUAL_FP64(1.1f, d1);
6483  ASSERT_EQUAL_FP64(1.5f, d2);
6484  ASSERT_EQUAL_FP64(1.9f, d3);
6485  ASSERT_EQUAL_FP64(2.5f, d4);
6486  ASSERT_EQUAL_FP64(-1.5f, d5);
6487  ASSERT_EQUAL_FP64(-2.5f, d6);
6488  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6489  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
6490  ASSERT_EQUAL_FP64(0.0f, d9);
6491  ASSERT_EQUAL_FP64(-0.0f, d10);
6492  ASSERT_EQUAL_FP64(FLT_MAX, d11);
6493  ASSERT_EQUAL_FP64(FLT_MIN, d12);
6494
6495  // Check that the NaN payload is preserved according to ARM64 conversion
6496  // rules:
6497  //  - The sign bit is preserved.
6498  //  - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6499  //  - The remaining mantissa bits are copied until they run out.
6500  //  - The low-order bits that haven't already been assigned are set to 0.
6501  ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6502  ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
6503
6504  TEARDOWN();
6505}
6506
6507
6508TEST(fcvt_sd) {
6509  INIT_V8();
6510  // There are a huge number of corner-cases to check, so this test iterates
6511  // through a list. The list is then negated and checked again (since the sign
6512  // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6513  // negative values.
6514  //
6515  // Note that this test only checks ties-to-even rounding, because that is all
6516  // that the simulator supports.
6517  struct {double in; float expected;} test[] = {
6518    // Check some simple conversions.
6519    {0.0, 0.0f},
6520    {1.0, 1.0f},
6521    {1.5, 1.5f},
6522    {2.0, 2.0f},
6523    {FLT_MAX, FLT_MAX},
6524    //  - The smallest normalized float.
6525    {pow(2.0, -126), powf(2, -126)},
6526    //  - Normal floats that need (ties-to-even) rounding.
6527    //    For normalized numbers:
6528    //         bit 29 (0x0000000020000000) is the lowest-order bit which will
6529    //                                     fit in the float's mantissa.
6530    {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
6531    {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
6532    {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
6533    {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
6534    {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
6535    {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
6536    {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
6537    {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
6538    {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
6539    {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
6540    {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
6541    {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
6542    {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
6543    //  - A mantissa that overflows into the exponent during rounding.
6544    {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
6545    //  - The largest double that rounds to a normal float.
6546    {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
6547
6548    // Doubles that are too big for a float.
6549    {kFP64PositiveInfinity, kFP32PositiveInfinity},
6550    {DBL_MAX, kFP32PositiveInfinity},
6551    //  - The smallest exponent that's too big for a float.
6552    {pow(2.0, 128), kFP32PositiveInfinity},
6553    //  - This exponent is in range, but the value rounds to infinity.
6554    {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
6555
6556    // Doubles that are too small for a float.
6557    //  - The smallest (subnormal) double.
6558    {DBL_MIN, 0.0},
6559    //  - The largest double which is too small for a subnormal float.
6560    {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
6561
6562    // Normal doubles that become subnormal floats.
6563    //  - The largest subnormal float.
6564    {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
6565    //  - The smallest subnormal float.
6566    {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
6567    //  - Subnormal floats that need (ties-to-even) rounding.
6568    //    For these subnormals:
6569    //         bit 34 (0x0000000400000000) is the lowest-order bit which will
6570    //                                     fit in the float's mantissa.
6571    {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
6572    {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
6573    {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
6574    {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
6575    {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
6576    {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
6577    {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
6578    {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
6579    {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
6580    {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
6581    {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
6582    {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
6583    {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
6584    //  - The smallest double which rounds up to become a subnormal float.
6585    {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
6586
6587    // Check NaN payload preservation.
6588    {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
6589    {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
6590    //  - Signalling NaNs become quiet NaNs.
6591    {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
6592    {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
6593    {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
6594  };
6595  int count = sizeof(test) / sizeof(test[0]);
6596
6597  for (int i = 0; i < count; i++) {
6598    double in = test[i].in;
6599    float expected = test[i].expected;
6600
6601    // We only expect positive input.
6602    ASSERT(std::signbit(in) == 0);
6603    ASSERT(std::signbit(expected) == 0);
6604
6605    SETUP();
6606    START();
6607
6608    __ Fmov(d10, in);
6609    __ Fcvt(s20, d10);
6610
6611    __ Fmov(d11, -in);
6612    __ Fcvt(s21, d11);
6613
6614    END();
6615    RUN();
6616    ASSERT_EQUAL_FP32(expected, s20);
6617    ASSERT_EQUAL_FP32(-expected, s21);
6618    TEARDOWN();
6619  }
6620}
6621
6622
6623TEST(fcvtas) {
6624  INIT_V8();
6625  SETUP();
6626
6627  START();
6628  __ Fmov(s0, 1.0);
6629  __ Fmov(s1, 1.1);
6630  __ Fmov(s2, 2.5);
6631  __ Fmov(s3, -2.5);
6632  __ Fmov(s4, kFP32PositiveInfinity);
6633  __ Fmov(s5, kFP32NegativeInfinity);
6634  __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
6635  __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
6636  __ Fmov(d8, 1.0);
6637  __ Fmov(d9, 1.1);
6638  __ Fmov(d10, 2.5);
6639  __ Fmov(d11, -2.5);
6640  __ Fmov(d12, kFP64PositiveInfinity);
6641  __ Fmov(d13, kFP64NegativeInfinity);
6642  __ Fmov(d14, kWMaxInt - 1);
6643  __ Fmov(d15, kWMinInt + 1);
6644  __ Fmov(s17, 1.1);
6645  __ Fmov(s18, 2.5);
6646  __ Fmov(s19, -2.5);
6647  __ Fmov(s20, kFP32PositiveInfinity);
6648  __ Fmov(s21, kFP32NegativeInfinity);
6649  __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
6650  __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
6651  __ Fmov(d24, 1.1);
6652  __ Fmov(d25, 2.5);
6653  __ Fmov(d26, -2.5);
6654  __ Fmov(d27, kFP64PositiveInfinity);
6655  __ Fmov(d28, kFP64NegativeInfinity);
6656  __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
6657  __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
6658
6659  __ Fcvtas(w0, s0);
6660  __ Fcvtas(w1, s1);
6661  __ Fcvtas(w2, s2);
6662  __ Fcvtas(w3, s3);
6663  __ Fcvtas(w4, s4);
6664  __ Fcvtas(w5, s5);
6665  __ Fcvtas(w6, s6);
6666  __ Fcvtas(w7, s7);
6667  __ Fcvtas(w8, d8);
6668  __ Fcvtas(w9, d9);
6669  __ Fcvtas(w10, d10);
6670  __ Fcvtas(w11, d11);
6671  __ Fcvtas(w12, d12);
6672  __ Fcvtas(w13, d13);
6673  __ Fcvtas(w14, d14);
6674  __ Fcvtas(w15, d15);
6675  __ Fcvtas(x17, s17);
6676  __ Fcvtas(x18, s18);
6677  __ Fcvtas(x19, s19);
6678  __ Fcvtas(x20, s20);
6679  __ Fcvtas(x21, s21);
6680  __ Fcvtas(x22, s22);
6681  __ Fcvtas(x23, s23);
6682  __ Fcvtas(x24, d24);
6683  __ Fcvtas(x25, d25);
6684  __ Fcvtas(x26, d26);
6685  __ Fcvtas(x27, d27);
6686  __ Fcvtas(x28, d28);
6687  __ Fcvtas(x29, d29);
6688  __ Fcvtas(x30, d30);
6689  END();
6690
6691  RUN();
6692
6693  ASSERT_EQUAL_64(1, x0);
6694  ASSERT_EQUAL_64(1, x1);
6695  ASSERT_EQUAL_64(3, x2);
6696  ASSERT_EQUAL_64(0xfffffffd, x3);
6697  ASSERT_EQUAL_64(0x7fffffff, x4);
6698  ASSERT_EQUAL_64(0x80000000, x5);
6699  ASSERT_EQUAL_64(0x7fffff80, x6);
6700  ASSERT_EQUAL_64(0x80000080, x7);
6701  ASSERT_EQUAL_64(1, x8);
6702  ASSERT_EQUAL_64(1, x9);
6703  ASSERT_EQUAL_64(3, x10);
6704  ASSERT_EQUAL_64(0xfffffffd, x11);
6705  ASSERT_EQUAL_64(0x7fffffff, x12);
6706  ASSERT_EQUAL_64(0x80000000, x13);
6707  ASSERT_EQUAL_64(0x7ffffffe, x14);
6708  ASSERT_EQUAL_64(0x80000001, x15);
6709  ASSERT_EQUAL_64(1, x17);
6710  ASSERT_EQUAL_64(3, x18);
6711  ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
6712  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6713  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6714  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6715  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6716  ASSERT_EQUAL_64(1, x24);
6717  ASSERT_EQUAL_64(3, x25);
6718  ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
6719  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6720  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6721  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6722  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6723
6724  TEARDOWN();
6725}
6726
6727
6728TEST(fcvtau) {
6729  INIT_V8();
6730  SETUP();
6731
6732  START();
6733  __ Fmov(s0, 1.0);
6734  __ Fmov(s1, 1.1);
6735  __ Fmov(s2, 2.5);
6736  __ Fmov(s3, -2.5);
6737  __ Fmov(s4, kFP32PositiveInfinity);
6738  __ Fmov(s5, kFP32NegativeInfinity);
6739  __ Fmov(s6, 0xffffff00);  // Largest float < UINT32_MAX.
6740  __ Fmov(d8, 1.0);
6741  __ Fmov(d9, 1.1);
6742  __ Fmov(d10, 2.5);
6743  __ Fmov(d11, -2.5);
6744  __ Fmov(d12, kFP64PositiveInfinity);
6745  __ Fmov(d13, kFP64NegativeInfinity);
6746  __ Fmov(d14, 0xfffffffe);
6747  __ Fmov(s16, 1.0);
6748  __ Fmov(s17, 1.1);
6749  __ Fmov(s18, 2.5);
6750  __ Fmov(s19, -2.5);
6751  __ Fmov(s20, kFP32PositiveInfinity);
6752  __ Fmov(s21, kFP32NegativeInfinity);
6753  __ Fmov(s22, 0xffffff0000000000UL);  // Largest float < UINT64_MAX.
6754  __ Fmov(d24, 1.1);
6755  __ Fmov(d25, 2.5);
6756  __ Fmov(d26, -2.5);
6757  __ Fmov(d27, kFP64PositiveInfinity);
6758  __ Fmov(d28, kFP64NegativeInfinity);
6759  __ Fmov(d29, 0xfffffffffffff800UL);  // Largest double < UINT64_MAX.
6760  __ Fmov(s30, 0x100000000UL);
6761
6762  __ Fcvtau(w0, s0);
6763  __ Fcvtau(w1, s1);
6764  __ Fcvtau(w2, s2);
6765  __ Fcvtau(w3, s3);
6766  __ Fcvtau(w4, s4);
6767  __ Fcvtau(w5, s5);
6768  __ Fcvtau(w6, s6);
6769  __ Fcvtau(w8, d8);
6770  __ Fcvtau(w9, d9);
6771  __ Fcvtau(w10, d10);
6772  __ Fcvtau(w11, d11);
6773  __ Fcvtau(w12, d12);
6774  __ Fcvtau(w13, d13);
6775  __ Fcvtau(w14, d14);
6776  __ Fcvtau(w15, d15);
6777  __ Fcvtau(x16, s16);
6778  __ Fcvtau(x17, s17);
6779  __ Fcvtau(x18, s18);
6780  __ Fcvtau(x19, s19);
6781  __ Fcvtau(x20, s20);
6782  __ Fcvtau(x21, s21);
6783  __ Fcvtau(x22, s22);
6784  __ Fcvtau(x24, d24);
6785  __ Fcvtau(x25, d25);
6786  __ Fcvtau(x26, d26);
6787  __ Fcvtau(x27, d27);
6788  __ Fcvtau(x28, d28);
6789  __ Fcvtau(x29, d29);
6790  __ Fcvtau(w30, s30);
6791  END();
6792
6793  RUN();
6794
6795  ASSERT_EQUAL_64(1, x0);
6796  ASSERT_EQUAL_64(1, x1);
6797  ASSERT_EQUAL_64(3, x2);
6798  ASSERT_EQUAL_64(0, x3);
6799  ASSERT_EQUAL_64(0xffffffff, x4);
6800  ASSERT_EQUAL_64(0, x5);
6801  ASSERT_EQUAL_64(0xffffff00, x6);
6802  ASSERT_EQUAL_64(1, x8);
6803  ASSERT_EQUAL_64(1, x9);
6804  ASSERT_EQUAL_64(3, x10);
6805  ASSERT_EQUAL_64(0, x11);
6806  ASSERT_EQUAL_64(0xffffffff, x12);
6807  ASSERT_EQUAL_64(0, x13);
6808  ASSERT_EQUAL_64(0xfffffffe, x14);
6809  ASSERT_EQUAL_64(1, x16);
6810  ASSERT_EQUAL_64(1, x17);
6811  ASSERT_EQUAL_64(3, x18);
6812  ASSERT_EQUAL_64(0, x19);
6813  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
6814  ASSERT_EQUAL_64(0, x21);
6815  ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
6816  ASSERT_EQUAL_64(1, x24);
6817  ASSERT_EQUAL_64(3, x25);
6818  ASSERT_EQUAL_64(0, x26);
6819  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
6820  ASSERT_EQUAL_64(0, x28);
6821  ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
6822  ASSERT_EQUAL_64(0xffffffff, x30);
6823
6824  TEARDOWN();
6825}
6826
6827
6828TEST(fcvtms) {
6829  INIT_V8();
6830  SETUP();
6831
6832  START();
6833  __ Fmov(s0, 1.0);
6834  __ Fmov(s1, 1.1);
6835  __ Fmov(s2, 1.5);
6836  __ Fmov(s3, -1.5);
6837  __ Fmov(s4, kFP32PositiveInfinity);
6838  __ Fmov(s5, kFP32NegativeInfinity);
6839  __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
6840  __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
6841  __ Fmov(d8, 1.0);
6842  __ Fmov(d9, 1.1);
6843  __ Fmov(d10, 1.5);
6844  __ Fmov(d11, -1.5);
6845  __ Fmov(d12, kFP64PositiveInfinity);
6846  __ Fmov(d13, kFP64NegativeInfinity);
6847  __ Fmov(d14, kWMaxInt - 1);
6848  __ Fmov(d15, kWMinInt + 1);
6849  __ Fmov(s17, 1.1);
6850  __ Fmov(s18, 1.5);
6851  __ Fmov(s19, -1.5);
6852  __ Fmov(s20, kFP32PositiveInfinity);
6853  __ Fmov(s21, kFP32NegativeInfinity);
6854  __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
6855  __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
6856  __ Fmov(d24, 1.1);
6857  __ Fmov(d25, 1.5);
6858  __ Fmov(d26, -1.5);
6859  __ Fmov(d27, kFP64PositiveInfinity);
6860  __ Fmov(d28, kFP64NegativeInfinity);
6861  __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
6862  __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
6863
6864  __ Fcvtms(w0, s0);
6865  __ Fcvtms(w1, s1);
6866  __ Fcvtms(w2, s2);
6867  __ Fcvtms(w3, s3);
6868  __ Fcvtms(w4, s4);
6869  __ Fcvtms(w5, s5);
6870  __ Fcvtms(w6, s6);
6871  __ Fcvtms(w7, s7);
6872  __ Fcvtms(w8, d8);
6873  __ Fcvtms(w9, d9);
6874  __ Fcvtms(w10, d10);
6875  __ Fcvtms(w11, d11);
6876  __ Fcvtms(w12, d12);
6877  __ Fcvtms(w13, d13);
6878  __ Fcvtms(w14, d14);
6879  __ Fcvtms(w15, d15);
6880  __ Fcvtms(x17, s17);
6881  __ Fcvtms(x18, s18);
6882  __ Fcvtms(x19, s19);
6883  __ Fcvtms(x20, s20);
6884  __ Fcvtms(x21, s21);
6885  __ Fcvtms(x22, s22);
6886  __ Fcvtms(x23, s23);
6887  __ Fcvtms(x24, d24);
6888  __ Fcvtms(x25, d25);
6889  __ Fcvtms(x26, d26);
6890  __ Fcvtms(x27, d27);
6891  __ Fcvtms(x28, d28);
6892  __ Fcvtms(x29, d29);
6893  __ Fcvtms(x30, d30);
6894  END();
6895
6896  RUN();
6897
6898  ASSERT_EQUAL_64(1, x0);
6899  ASSERT_EQUAL_64(1, x1);
6900  ASSERT_EQUAL_64(1, x2);
6901  ASSERT_EQUAL_64(0xfffffffe, x3);
6902  ASSERT_EQUAL_64(0x7fffffff, x4);
6903  ASSERT_EQUAL_64(0x80000000, x5);
6904  ASSERT_EQUAL_64(0x7fffff80, x6);
6905  ASSERT_EQUAL_64(0x80000080, x7);
6906  ASSERT_EQUAL_64(1, x8);
6907  ASSERT_EQUAL_64(1, x9);
6908  ASSERT_EQUAL_64(1, x10);
6909  ASSERT_EQUAL_64(0xfffffffe, x11);
6910  ASSERT_EQUAL_64(0x7fffffff, x12);
6911  ASSERT_EQUAL_64(0x80000000, x13);
6912  ASSERT_EQUAL_64(0x7ffffffe, x14);
6913  ASSERT_EQUAL_64(0x80000001, x15);
6914  ASSERT_EQUAL_64(1, x17);
6915  ASSERT_EQUAL_64(1, x18);
6916  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
6917  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
6918  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
6919  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
6920  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
6921  ASSERT_EQUAL_64(1, x24);
6922  ASSERT_EQUAL_64(1, x25);
6923  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
6924  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
6925  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
6926  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
6927  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
6928
6929  TEARDOWN();
6930}
6931
6932
6933TEST(fcvtmu) {
6934  INIT_V8();
6935  SETUP();
6936
6937  START();
6938  __ Fmov(s0, 1.0);
6939  __ Fmov(s1, 1.1);
6940  __ Fmov(s2, 1.5);
6941  __ Fmov(s3, -1.5);
6942  __ Fmov(s4, kFP32PositiveInfinity);
6943  __ Fmov(s5, kFP32NegativeInfinity);
6944  __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
6945  __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
6946  __ Fmov(d8, 1.0);
6947  __ Fmov(d9, 1.1);
6948  __ Fmov(d10, 1.5);
6949  __ Fmov(d11, -1.5);
6950  __ Fmov(d12, kFP64PositiveInfinity);
6951  __ Fmov(d13, kFP64NegativeInfinity);
6952  __ Fmov(d14, kWMaxInt - 1);
6953  __ Fmov(d15, kWMinInt + 1);
6954  __ Fmov(s17, 1.1);
6955  __ Fmov(s18, 1.5);
6956  __ Fmov(s19, -1.5);
6957  __ Fmov(s20, kFP32PositiveInfinity);
6958  __ Fmov(s21, kFP32NegativeInfinity);
6959  __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
6960  __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
6961  __ Fmov(d24, 1.1);
6962  __ Fmov(d25, 1.5);
6963  __ Fmov(d26, -1.5);
6964  __ Fmov(d27, kFP64PositiveInfinity);
6965  __ Fmov(d28, kFP64NegativeInfinity);
6966  __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
6967  __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
6968
6969  __ Fcvtmu(w0, s0);
6970  __ Fcvtmu(w1, s1);
6971  __ Fcvtmu(w2, s2);
6972  __ Fcvtmu(w3, s3);
6973  __ Fcvtmu(w4, s4);
6974  __ Fcvtmu(w5, s5);
6975  __ Fcvtmu(w6, s6);
6976  __ Fcvtmu(w7, s7);
6977  __ Fcvtmu(w8, d8);
6978  __ Fcvtmu(w9, d9);
6979  __ Fcvtmu(w10, d10);
6980  __ Fcvtmu(w11, d11);
6981  __ Fcvtmu(w12, d12);
6982  __ Fcvtmu(w13, d13);
6983  __ Fcvtmu(w14, d14);
6984  __ Fcvtmu(x17, s17);
6985  __ Fcvtmu(x18, s18);
6986  __ Fcvtmu(x19, s19);
6987  __ Fcvtmu(x20, s20);
6988  __ Fcvtmu(x21, s21);
6989  __ Fcvtmu(x22, s22);
6990  __ Fcvtmu(x23, s23);
6991  __ Fcvtmu(x24, d24);
6992  __ Fcvtmu(x25, d25);
6993  __ Fcvtmu(x26, d26);
6994  __ Fcvtmu(x27, d27);
6995  __ Fcvtmu(x28, d28);
6996  __ Fcvtmu(x29, d29);
6997  __ Fcvtmu(x30, d30);
6998  END();
6999
7000  RUN();
7001
7002  ASSERT_EQUAL_64(1, x0);
7003  ASSERT_EQUAL_64(1, x1);
7004  ASSERT_EQUAL_64(1, x2);
7005  ASSERT_EQUAL_64(0, x3);
7006  ASSERT_EQUAL_64(0xffffffff, x4);
7007  ASSERT_EQUAL_64(0, x5);
7008  ASSERT_EQUAL_64(0x7fffff80, x6);
7009  ASSERT_EQUAL_64(0, x7);
7010  ASSERT_EQUAL_64(1, x8);
7011  ASSERT_EQUAL_64(1, x9);
7012  ASSERT_EQUAL_64(1, x10);
7013  ASSERT_EQUAL_64(0, x11);
7014  ASSERT_EQUAL_64(0xffffffff, x12);
7015  ASSERT_EQUAL_64(0, x13);
7016  ASSERT_EQUAL_64(0x7ffffffe, x14);
7017  ASSERT_EQUAL_64(1, x17);
7018  ASSERT_EQUAL_64(1, x18);
7019  ASSERT_EQUAL_64(0x0UL, x19);
7020  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7021  ASSERT_EQUAL_64(0x0UL, x21);
7022  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7023  ASSERT_EQUAL_64(0x0UL, x23);
7024  ASSERT_EQUAL_64(1, x24);
7025  ASSERT_EQUAL_64(1, x25);
7026  ASSERT_EQUAL_64(0x0UL, x26);
7027  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7028  ASSERT_EQUAL_64(0x0UL, x28);
7029  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7030  ASSERT_EQUAL_64(0x0UL, x30);
7031
7032  TEARDOWN();
7033}
7034
7035
7036TEST(fcvtns) {
7037  INIT_V8();
7038  SETUP();
7039
7040  START();
7041  __ Fmov(s0, 1.0);
7042  __ Fmov(s1, 1.1);
7043  __ Fmov(s2, 1.5);
7044  __ Fmov(s3, -1.5);
7045  __ Fmov(s4, kFP32PositiveInfinity);
7046  __ Fmov(s5, kFP32NegativeInfinity);
7047  __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7048  __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7049  __ Fmov(d8, 1.0);
7050  __ Fmov(d9, 1.1);
7051  __ Fmov(d10, 1.5);
7052  __ Fmov(d11, -1.5);
7053  __ Fmov(d12, kFP64PositiveInfinity);
7054  __ Fmov(d13, kFP64NegativeInfinity);
7055  __ Fmov(d14, kWMaxInt - 1);
7056  __ Fmov(d15, kWMinInt + 1);
7057  __ Fmov(s17, 1.1);
7058  __ Fmov(s18, 1.5);
7059  __ Fmov(s19, -1.5);
7060  __ Fmov(s20, kFP32PositiveInfinity);
7061  __ Fmov(s21, kFP32NegativeInfinity);
7062  __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7063  __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7064  __ Fmov(d24, 1.1);
7065  __ Fmov(d25, 1.5);
7066  __ Fmov(d26, -1.5);
7067  __ Fmov(d27, kFP64PositiveInfinity);
7068  __ Fmov(d28, kFP64NegativeInfinity);
7069  __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7070  __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7071
7072  __ Fcvtns(w0, s0);
7073  __ Fcvtns(w1, s1);
7074  __ Fcvtns(w2, s2);
7075  __ Fcvtns(w3, s3);
7076  __ Fcvtns(w4, s4);
7077  __ Fcvtns(w5, s5);
7078  __ Fcvtns(w6, s6);
7079  __ Fcvtns(w7, s7);
7080  __ Fcvtns(w8, d8);
7081  __ Fcvtns(w9, d9);
7082  __ Fcvtns(w10, d10);
7083  __ Fcvtns(w11, d11);
7084  __ Fcvtns(w12, d12);
7085  __ Fcvtns(w13, d13);
7086  __ Fcvtns(w14, d14);
7087  __ Fcvtns(w15, d15);
7088  __ Fcvtns(x17, s17);
7089  __ Fcvtns(x18, s18);
7090  __ Fcvtns(x19, s19);
7091  __ Fcvtns(x20, s20);
7092  __ Fcvtns(x21, s21);
7093  __ Fcvtns(x22, s22);
7094  __ Fcvtns(x23, s23);
7095  __ Fcvtns(x24, d24);
7096  __ Fcvtns(x25, d25);
7097  __ Fcvtns(x26, d26);
7098  __ Fcvtns(x27, d27);
7099//  __ Fcvtns(x28, d28);
7100  __ Fcvtns(x29, d29);
7101  __ Fcvtns(x30, d30);
7102  END();
7103
7104  RUN();
7105
7106  ASSERT_EQUAL_64(1, x0);
7107  ASSERT_EQUAL_64(1, x1);
7108  ASSERT_EQUAL_64(2, x2);
7109  ASSERT_EQUAL_64(0xfffffffe, x3);
7110  ASSERT_EQUAL_64(0x7fffffff, x4);
7111  ASSERT_EQUAL_64(0x80000000, x5);
7112  ASSERT_EQUAL_64(0x7fffff80, x6);
7113  ASSERT_EQUAL_64(0x80000080, x7);
7114  ASSERT_EQUAL_64(1, x8);
7115  ASSERT_EQUAL_64(1, x9);
7116  ASSERT_EQUAL_64(2, x10);
7117  ASSERT_EQUAL_64(0xfffffffe, x11);
7118  ASSERT_EQUAL_64(0x7fffffff, x12);
7119  ASSERT_EQUAL_64(0x80000000, x13);
7120  ASSERT_EQUAL_64(0x7ffffffe, x14);
7121  ASSERT_EQUAL_64(0x80000001, x15);
7122  ASSERT_EQUAL_64(1, x17);
7123  ASSERT_EQUAL_64(2, x18);
7124  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
7125  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
7126  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
7127  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7128  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
7129  ASSERT_EQUAL_64(1, x24);
7130  ASSERT_EQUAL_64(2, x25);
7131  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
7132  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
7133//  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
7134  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7135  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
7136
7137  TEARDOWN();
7138}
7139
7140
7141TEST(fcvtnu) {
7142  INIT_V8();
7143  SETUP();
7144
7145  START();
7146  __ Fmov(s0, 1.0);
7147  __ Fmov(s1, 1.1);
7148  __ Fmov(s2, 1.5);
7149  __ Fmov(s3, -1.5);
7150  __ Fmov(s4, kFP32PositiveInfinity);
7151  __ Fmov(s5, kFP32NegativeInfinity);
7152  __ Fmov(s6, 0xffffff00);  // Largest float < UINT32_MAX.
7153  __ Fmov(d8, 1.0);
7154  __ Fmov(d9, 1.1);
7155  __ Fmov(d10, 1.5);
7156  __ Fmov(d11, -1.5);
7157  __ Fmov(d12, kFP64PositiveInfinity);
7158  __ Fmov(d13, kFP64NegativeInfinity);
7159  __ Fmov(d14, 0xfffffffe);
7160  __ Fmov(s16, 1.0);
7161  __ Fmov(s17, 1.1);
7162  __ Fmov(s18, 1.5);
7163  __ Fmov(s19, -1.5);
7164  __ Fmov(s20, kFP32PositiveInfinity);
7165  __ Fmov(s21, kFP32NegativeInfinity);
7166  __ Fmov(s22, 0xffffff0000000000UL);   // Largest float < UINT64_MAX.
7167  __ Fmov(d24, 1.1);
7168  __ Fmov(d25, 1.5);
7169  __ Fmov(d26, -1.5);
7170  __ Fmov(d27, kFP64PositiveInfinity);
7171  __ Fmov(d28, kFP64NegativeInfinity);
7172  __ Fmov(d29, 0xfffffffffffff800UL);   // Largest double < UINT64_MAX.
7173  __ Fmov(s30, 0x100000000UL);
7174
7175  __ Fcvtnu(w0, s0);
7176  __ Fcvtnu(w1, s1);
7177  __ Fcvtnu(w2, s2);
7178  __ Fcvtnu(w3, s3);
7179  __ Fcvtnu(w4, s4);
7180  __ Fcvtnu(w5, s5);
7181  __ Fcvtnu(w6, s6);
7182  __ Fcvtnu(w8, d8);
7183  __ Fcvtnu(w9, d9);
7184  __ Fcvtnu(w10, d10);
7185  __ Fcvtnu(w11, d11);
7186  __ Fcvtnu(w12, d12);
7187  __ Fcvtnu(w13, d13);
7188  __ Fcvtnu(w14, d14);
7189  __ Fcvtnu(w15, d15);
7190  __ Fcvtnu(x16, s16);
7191  __ Fcvtnu(x17, s17);
7192  __ Fcvtnu(x18, s18);
7193  __ Fcvtnu(x19, s19);
7194  __ Fcvtnu(x20, s20);
7195  __ Fcvtnu(x21, s21);
7196  __ Fcvtnu(x22, s22);
7197  __ Fcvtnu(x24, d24);
7198  __ Fcvtnu(x25, d25);
7199  __ Fcvtnu(x26, d26);
7200  __ Fcvtnu(x27, d27);
7201//  __ Fcvtnu(x28, d28);
7202  __ Fcvtnu(x29, d29);
7203  __ Fcvtnu(w30, s30);
7204  END();
7205
7206  RUN();
7207
7208  ASSERT_EQUAL_64(1, x0);
7209  ASSERT_EQUAL_64(1, x1);
7210  ASSERT_EQUAL_64(2, x2);
7211  ASSERT_EQUAL_64(0, x3);
7212  ASSERT_EQUAL_64(0xffffffff, x4);
7213  ASSERT_EQUAL_64(0, x5);
7214  ASSERT_EQUAL_64(0xffffff00, x6);
7215  ASSERT_EQUAL_64(1, x8);
7216  ASSERT_EQUAL_64(1, x9);
7217  ASSERT_EQUAL_64(2, x10);
7218  ASSERT_EQUAL_64(0, x11);
7219  ASSERT_EQUAL_64(0xffffffff, x12);
7220  ASSERT_EQUAL_64(0, x13);
7221  ASSERT_EQUAL_64(0xfffffffe, x14);
7222  ASSERT_EQUAL_64(1, x16);
7223  ASSERT_EQUAL_64(1, x17);
7224  ASSERT_EQUAL_64(2, x18);
7225  ASSERT_EQUAL_64(0, x19);
7226  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7227  ASSERT_EQUAL_64(0, x21);
7228  ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
7229  ASSERT_EQUAL_64(1, x24);
7230  ASSERT_EQUAL_64(2, x25);
7231  ASSERT_EQUAL_64(0, x26);
7232  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7233//  ASSERT_EQUAL_64(0, x28);
7234  ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
7235  ASSERT_EQUAL_64(0xffffffff, x30);
7236
7237  TEARDOWN();
7238}
7239
7240
7241TEST(fcvtzs) {
7242  INIT_V8();
7243  SETUP();
7244
7245  START();
7246  __ Fmov(s0, 1.0);
7247  __ Fmov(s1, 1.1);
7248  __ Fmov(s2, 1.5);
7249  __ Fmov(s3, -1.5);
7250  __ Fmov(s4, kFP32PositiveInfinity);
7251  __ Fmov(s5, kFP32NegativeInfinity);
7252  __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7253  __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7254  __ Fmov(d8, 1.0);
7255  __ Fmov(d9, 1.1);
7256  __ Fmov(d10, 1.5);
7257  __ Fmov(d11, -1.5);
7258  __ Fmov(d12, kFP64PositiveInfinity);
7259  __ Fmov(d13, kFP64NegativeInfinity);
7260  __ Fmov(d14, kWMaxInt - 1);
7261  __ Fmov(d15, kWMinInt + 1);
7262  __ Fmov(s17, 1.1);
7263  __ Fmov(s18, 1.5);
7264  __ Fmov(s19, -1.5);
7265  __ Fmov(s20, kFP32PositiveInfinity);
7266  __ Fmov(s21, kFP32NegativeInfinity);
7267  __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7268  __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7269  __ Fmov(d24, 1.1);
7270  __ Fmov(d25, 1.5);
7271  __ Fmov(d26, -1.5);
7272  __ Fmov(d27, kFP64PositiveInfinity);
7273  __ Fmov(d28, kFP64NegativeInfinity);
7274  __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7275  __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7276
7277  __ Fcvtzs(w0, s0);
7278  __ Fcvtzs(w1, s1);
7279  __ Fcvtzs(w2, s2);
7280  __ Fcvtzs(w3, s3);
7281  __ Fcvtzs(w4, s4);
7282  __ Fcvtzs(w5, s5);
7283  __ Fcvtzs(w6, s6);
7284  __ Fcvtzs(w7, s7);
7285  __ Fcvtzs(w8, d8);
7286  __ Fcvtzs(w9, d9);
7287  __ Fcvtzs(w10, d10);
7288  __ Fcvtzs(w11, d11);
7289  __ Fcvtzs(w12, d12);
7290  __ Fcvtzs(w13, d13);
7291  __ Fcvtzs(w14, d14);
7292  __ Fcvtzs(w15, d15);
7293  __ Fcvtzs(x17, s17);
7294  __ Fcvtzs(x18, s18);
7295  __ Fcvtzs(x19, s19);
7296  __ Fcvtzs(x20, s20);
7297  __ Fcvtzs(x21, s21);
7298  __ Fcvtzs(x22, s22);
7299  __ Fcvtzs(x23, s23);
7300  __ Fcvtzs(x24, d24);
7301  __ Fcvtzs(x25, d25);
7302  __ Fcvtzs(x26, d26);
7303  __ Fcvtzs(x27, d27);
7304  __ Fcvtzs(x28, d28);
7305  __ Fcvtzs(x29, d29);
7306  __ Fcvtzs(x30, d30);
7307  END();
7308
7309  RUN();
7310
7311  ASSERT_EQUAL_64(1, x0);
7312  ASSERT_EQUAL_64(1, x1);
7313  ASSERT_EQUAL_64(1, x2);
7314  ASSERT_EQUAL_64(0xffffffff, x3);
7315  ASSERT_EQUAL_64(0x7fffffff, x4);
7316  ASSERT_EQUAL_64(0x80000000, x5);
7317  ASSERT_EQUAL_64(0x7fffff80, x6);
7318  ASSERT_EQUAL_64(0x80000080, x7);
7319  ASSERT_EQUAL_64(1, x8);
7320  ASSERT_EQUAL_64(1, x9);
7321  ASSERT_EQUAL_64(1, x10);
7322  ASSERT_EQUAL_64(0xffffffff, x11);
7323  ASSERT_EQUAL_64(0x7fffffff, x12);
7324  ASSERT_EQUAL_64(0x80000000, x13);
7325  ASSERT_EQUAL_64(0x7ffffffe, x14);
7326  ASSERT_EQUAL_64(0x80000001, x15);
7327  ASSERT_EQUAL_64(1, x17);
7328  ASSERT_EQUAL_64(1, x18);
7329  ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
7330  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
7331  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
7332  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7333  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
7334  ASSERT_EQUAL_64(1, x24);
7335  ASSERT_EQUAL_64(1, x25);
7336  ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
7337  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
7338  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
7339  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7340  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
7341
7342  TEARDOWN();
7343}
7344
7345
7346TEST(fcvtzu) {
7347  INIT_V8();
7348  SETUP();
7349
7350  START();
7351  __ Fmov(s0, 1.0);
7352  __ Fmov(s1, 1.1);
7353  __ Fmov(s2, 1.5);
7354  __ Fmov(s3, -1.5);
7355  __ Fmov(s4, kFP32PositiveInfinity);
7356  __ Fmov(s5, kFP32NegativeInfinity);
7357  __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7358  __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7359  __ Fmov(d8, 1.0);
7360  __ Fmov(d9, 1.1);
7361  __ Fmov(d10, 1.5);
7362  __ Fmov(d11, -1.5);
7363  __ Fmov(d12, kFP64PositiveInfinity);
7364  __ Fmov(d13, kFP64NegativeInfinity);
7365  __ Fmov(d14, kWMaxInt - 1);
7366  __ Fmov(d15, kWMinInt + 1);
7367  __ Fmov(s17, 1.1);
7368  __ Fmov(s18, 1.5);
7369  __ Fmov(s19, -1.5);
7370  __ Fmov(s20, kFP32PositiveInfinity);
7371  __ Fmov(s21, kFP32NegativeInfinity);
7372  __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7373  __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7374  __ Fmov(d24, 1.1);
7375  __ Fmov(d25, 1.5);
7376  __ Fmov(d26, -1.5);
7377  __ Fmov(d27, kFP64PositiveInfinity);
7378  __ Fmov(d28, kFP64NegativeInfinity);
7379  __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7380  __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7381
7382  __ Fcvtzu(w0, s0);
7383  __ Fcvtzu(w1, s1);
7384  __ Fcvtzu(w2, s2);
7385  __ Fcvtzu(w3, s3);
7386  __ Fcvtzu(w4, s4);
7387  __ Fcvtzu(w5, s5);
7388  __ Fcvtzu(w6, s6);
7389  __ Fcvtzu(w7, s7);
7390  __ Fcvtzu(w8, d8);
7391  __ Fcvtzu(w9, d9);
7392  __ Fcvtzu(w10, d10);
7393  __ Fcvtzu(w11, d11);
7394  __ Fcvtzu(w12, d12);
7395  __ Fcvtzu(w13, d13);
7396  __ Fcvtzu(w14, d14);
7397  __ Fcvtzu(x17, s17);
7398  __ Fcvtzu(x18, s18);
7399  __ Fcvtzu(x19, s19);
7400  __ Fcvtzu(x20, s20);
7401  __ Fcvtzu(x21, s21);
7402  __ Fcvtzu(x22, s22);
7403  __ Fcvtzu(x23, s23);
7404  __ Fcvtzu(x24, d24);
7405  __ Fcvtzu(x25, d25);
7406  __ Fcvtzu(x26, d26);
7407  __ Fcvtzu(x27, d27);
7408  __ Fcvtzu(x28, d28);
7409  __ Fcvtzu(x29, d29);
7410  __ Fcvtzu(x30, d30);
7411  END();
7412
7413  RUN();
7414
7415  ASSERT_EQUAL_64(1, x0);
7416  ASSERT_EQUAL_64(1, x1);
7417  ASSERT_EQUAL_64(1, x2);
7418  ASSERT_EQUAL_64(0, x3);
7419  ASSERT_EQUAL_64(0xffffffff, x4);
7420  ASSERT_EQUAL_64(0, x5);
7421  ASSERT_EQUAL_64(0x7fffff80, x6);
7422  ASSERT_EQUAL_64(0, x7);
7423  ASSERT_EQUAL_64(1, x8);
7424  ASSERT_EQUAL_64(1, x9);
7425  ASSERT_EQUAL_64(1, x10);
7426  ASSERT_EQUAL_64(0, x11);
7427  ASSERT_EQUAL_64(0xffffffff, x12);
7428  ASSERT_EQUAL_64(0, x13);
7429  ASSERT_EQUAL_64(0x7ffffffe, x14);
7430  ASSERT_EQUAL_64(1, x17);
7431  ASSERT_EQUAL_64(1, x18);
7432  ASSERT_EQUAL_64(0x0UL, x19);
7433  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
7434  ASSERT_EQUAL_64(0x0UL, x21);
7435  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
7436  ASSERT_EQUAL_64(0x0UL, x23);
7437  ASSERT_EQUAL_64(1, x24);
7438  ASSERT_EQUAL_64(1, x25);
7439  ASSERT_EQUAL_64(0x0UL, x26);
7440  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
7441  ASSERT_EQUAL_64(0x0UL, x28);
7442  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
7443  ASSERT_EQUAL_64(0x0UL, x30);
7444
7445  TEARDOWN();
7446}
7447
7448
7449// Test that scvtf and ucvtf can convert the 64-bit input into the expected
7450// value. All possible values of 'fbits' are tested. The expected value is
7451// modified accordingly in each case.
7452//
7453// The expected value is specified as the bit encoding of the expected double
7454// produced by scvtf (expected_scvtf_bits) as well as ucvtf
7455// (expected_ucvtf_bits).
7456//
7457// Where the input value is representable by int32_t or uint32_t, conversions
7458// from W registers will also be tested.
7459static void TestUScvtfHelper(uint64_t in,
7460                             uint64_t expected_scvtf_bits,
7461                             uint64_t expected_ucvtf_bits) {
7462  uint64_t u64 = in;
7463  uint32_t u32 = u64 & 0xffffffff;
7464  int64_t s64 = static_cast<int64_t>(in);
7465  int32_t s32 = s64 & 0x7fffffff;
7466
7467  bool cvtf_s32 = (s64 == s32);
7468  bool cvtf_u32 = (u64 == u32);
7469
7470  double results_scvtf_x[65];
7471  double results_ucvtf_x[65];
7472  double results_scvtf_w[33];
7473  double results_ucvtf_w[33];
7474
7475  SETUP();
7476  START();
7477
7478  __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7479  __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7480  __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7481  __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7482
7483  __ Mov(x10, s64);
7484
7485  // Corrupt the top word, in case it is accidentally used during W-register
7486  // conversions.
7487  __ Mov(x11, 0x5555555555555555);
7488  __ Bfi(x11, x10, 0, kWRegSizeInBits);
7489
7490  // Test integer conversions.
7491  __ Scvtf(d0, x10);
7492  __ Ucvtf(d1, x10);
7493  __ Scvtf(d2, w11);
7494  __ Ucvtf(d3, w11);
7495  __ Str(d0, MemOperand(x0));
7496  __ Str(d1, MemOperand(x1));
7497  __ Str(d2, MemOperand(x2));
7498  __ Str(d3, MemOperand(x3));
7499
7500  // Test all possible values of fbits.
7501  for (int fbits = 1; fbits <= 32; fbits++) {
7502    __ Scvtf(d0, x10, fbits);
7503    __ Ucvtf(d1, x10, fbits);
7504    __ Scvtf(d2, w11, fbits);
7505    __ Ucvtf(d3, w11, fbits);
7506    __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7507    __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7508    __ Str(d2, MemOperand(x2, fbits * kDRegSize));
7509    __ Str(d3, MemOperand(x3, fbits * kDRegSize));
7510  }
7511
7512  // Conversions from W registers can only handle fbits values <= 32, so just
7513  // test conversions from X registers for 32 < fbits <= 64.
7514  for (int fbits = 33; fbits <= 64; fbits++) {
7515    __ Scvtf(d0, x10, fbits);
7516    __ Ucvtf(d1, x10, fbits);
7517    __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7518    __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7519  }
7520
7521  END();
7522  RUN();
7523
7524  // Check the results.
7525  double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7526  double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7527
7528  for (int fbits = 0; fbits <= 32; fbits++) {
7529    double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7530    double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7531    ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7532    ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7533    if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7534    if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7535  }
7536  for (int fbits = 33; fbits <= 64; fbits++) {
7537    double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7538    double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7539    ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7540    ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7541  }
7542
7543  TEARDOWN();
7544}
7545
7546
7547TEST(scvtf_ucvtf_double) {
7548  INIT_V8();
7549  // Simple conversions of positive numbers which require no rounding; the
7550  // results should not depened on the rounding mode, and ucvtf and scvtf should
7551  // produce the same result.
7552  TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7553  TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7554  TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7555  TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7556  TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7557  // Test mantissa extremities.
7558  TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7559  // The largest int32_t that fits in a double.
7560  TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7561  // Values that would be negative if treated as an int32_t.
7562  TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7563  TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7564  TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7565  // The largest int64_t that fits in a double.
7566  TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7567  // Check for bit pattern reproduction.
7568  TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7569  TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7570
7571  // Simple conversions of negative int64_t values. These require no rounding,
7572  // and the results should not depend on the rounding mode.
7573  TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7574  TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7575  TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7576
7577  // Conversions which require rounding.
7578  TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7579  TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7580  TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7581  TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7582  TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7583  TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7584  TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7585  TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7586  TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7587  TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7588  TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7589  TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7590  TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7591  // Check rounding of negative int64_t values (and large uint64_t values).
7592  TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7593  TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7594  TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7595  TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7596  TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7597  TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7598  TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7599  TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7600  TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7601  TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7602  TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7603  TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7604  TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7605  // Round up to produce a result that's too big for the input to represent.
7606  TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7607  TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7608  TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7609  TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7610}
7611
7612
7613// The same as TestUScvtfHelper, but convert to floats.
7614static void TestUScvtf32Helper(uint64_t in,
7615                               uint32_t expected_scvtf_bits,
7616                               uint32_t expected_ucvtf_bits) {
7617  uint64_t u64 = in;
7618  uint32_t u32 = u64 & 0xffffffff;
7619  int64_t s64 = static_cast<int64_t>(in);
7620  int32_t s32 = s64 & 0x7fffffff;
7621
7622  bool cvtf_s32 = (s64 == s32);
7623  bool cvtf_u32 = (u64 == u32);
7624
7625  float results_scvtf_x[65];
7626  float results_ucvtf_x[65];
7627  float results_scvtf_w[33];
7628  float results_ucvtf_w[33];
7629
7630  SETUP();
7631  START();
7632
7633  __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7634  __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7635  __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7636  __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7637
7638  __ Mov(x10, s64);
7639
7640  // Corrupt the top word, in case it is accidentally used during W-register
7641  // conversions.
7642  __ Mov(x11, 0x5555555555555555);
7643  __ Bfi(x11, x10, 0, kWRegSizeInBits);
7644
7645  // Test integer conversions.
7646  __ Scvtf(s0, x10);
7647  __ Ucvtf(s1, x10);
7648  __ Scvtf(s2, w11);
7649  __ Ucvtf(s3, w11);
7650  __ Str(s0, MemOperand(x0));
7651  __ Str(s1, MemOperand(x1));
7652  __ Str(s2, MemOperand(x2));
7653  __ Str(s3, MemOperand(x3));
7654
7655  // Test all possible values of fbits.
7656  for (int fbits = 1; fbits <= 32; fbits++) {
7657    __ Scvtf(s0, x10, fbits);
7658    __ Ucvtf(s1, x10, fbits);
7659    __ Scvtf(s2, w11, fbits);
7660    __ Ucvtf(s3, w11, fbits);
7661    __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7662    __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7663    __ Str(s2, MemOperand(x2, fbits * kSRegSize));
7664    __ Str(s3, MemOperand(x3, fbits * kSRegSize));
7665  }
7666
7667  // Conversions from W registers can only handle fbits values <= 32, so just
7668  // test conversions from X registers for 32 < fbits <= 64.
7669  for (int fbits = 33; fbits <= 64; fbits++) {
7670    __ Scvtf(s0, x10, fbits);
7671    __ Ucvtf(s1, x10, fbits);
7672    __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7673    __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7674  }
7675
7676  END();
7677  RUN();
7678
7679  // Check the results.
7680  float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7681  float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7682
7683  for (int fbits = 0; fbits <= 32; fbits++) {
7684    float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7685    float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7686    ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7687    ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7688    if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7689    if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7690    break;
7691  }
7692  for (int fbits = 33; fbits <= 64; fbits++) {
7693    break;
7694    float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7695    float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7696    ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7697    ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7698  }
7699
7700  TEARDOWN();
7701}
7702
7703
7704TEST(scvtf_ucvtf_float) {
7705  INIT_V8();
7706  // Simple conversions of positive numbers which require no rounding; the
7707  // results should not depened on the rounding mode, and ucvtf and scvtf should
7708  // produce the same result.
7709  TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7710  TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7711  TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7712  TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7713  TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7714  // Test mantissa extremities.
7715  TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7716  TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7717  // The largest int32_t that fits in a float.
7718  TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7719  // Values that would be negative if treated as an int32_t.
7720  TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7721  TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7722  TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7723  // The largest int64_t that fits in a float.
7724  TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7725  // Check for bit pattern reproduction.
7726  TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7727
7728  // Simple conversions of negative int64_t values. These require no rounding,
7729  // and the results should not depend on the rounding mode.
7730  TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7731  TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7732
7733  // Conversions which require rounding.
7734  TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7735  TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7736  TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7737  TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7738  TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7739  TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7740  TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7741  TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7742  TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7743  TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7744  TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7745  TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7746  TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7747  // Check rounding of negative int64_t values (and large uint64_t values).
7748  TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7749  TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7750  TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7751  TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7752  TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7753  TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7754  TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7755  TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7756  TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7757  TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7758  TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7759  TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7760  TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7761  // Round up to produce a result that's too big for the input to represent.
7762  TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7763  TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7764  TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
7765  TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
7766  TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
7767  TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
7768  TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
7769  TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
7770}
7771
7772
7773TEST(system_mrs) {
7774  INIT_V8();
7775  SETUP();
7776
7777  START();
7778  __ Mov(w0, 0);
7779  __ Mov(w1, 1);
7780  __ Mov(w2, 0x80000000);
7781
7782  // Set the Z and C flags.
7783  __ Cmp(w0, w0);
7784  __ Mrs(x3, NZCV);
7785
7786  // Set the N flag.
7787  __ Cmp(w0, w1);
7788  __ Mrs(x4, NZCV);
7789
7790  // Set the Z, C and V flags.
7791  __ Adds(w0, w2, w2);
7792  __ Mrs(x5, NZCV);
7793
7794  // Read the default FPCR.
7795  __ Mrs(x6, FPCR);
7796  END();
7797
7798  RUN();
7799
7800  // NZCV
7801  ASSERT_EQUAL_32(ZCFlag, w3);
7802  ASSERT_EQUAL_32(NFlag, w4);
7803  ASSERT_EQUAL_32(ZCVFlag, w5);
7804
7805  // FPCR
7806  // The default FPCR on Linux-based platforms is 0.
7807  ASSERT_EQUAL_32(0, w6);
7808
7809  TEARDOWN();
7810}
7811
7812
7813TEST(system_msr) {
7814  INIT_V8();
7815  // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
7816  const uint64_t fpcr_core = 0x07c00000;
7817
7818  // All FPCR fields (including fields which may be read-as-zero):
7819  //  Stride, Len
7820  //  IDE, IXE, UFE, OFE, DZE, IOE
7821  const uint64_t fpcr_all = fpcr_core | 0x00379f00;
7822
7823  SETUP();
7824
7825  START();
7826  __ Mov(w0, 0);
7827  __ Mov(w1, 0x7fffffff);
7828
7829  __ Mov(x7, 0);
7830
7831  __ Mov(x10, NVFlag);
7832  __ Cmp(w0, w0);     // Set Z and C.
7833  __ Msr(NZCV, x10);  // Set N and V.
7834  // The Msr should have overwritten every flag set by the Cmp.
7835  __ Cinc(x7, x7, mi);  // N
7836  __ Cinc(x7, x7, ne);  // !Z
7837  __ Cinc(x7, x7, lo);  // !C
7838  __ Cinc(x7, x7, vs);  // V
7839
7840  __ Mov(x10, ZCFlag);
7841  __ Cmn(w1, w1);     // Set N and V.
7842  __ Msr(NZCV, x10);  // Set Z and C.
7843  // The Msr should have overwritten every flag set by the Cmn.
7844  __ Cinc(x7, x7, pl);  // !N
7845  __ Cinc(x7, x7, eq);  // Z
7846  __ Cinc(x7, x7, hs);  // C
7847  __ Cinc(x7, x7, vc);  // !V
7848
7849  // All core FPCR fields must be writable.
7850  __ Mov(x8, fpcr_core);
7851  __ Msr(FPCR, x8);
7852  __ Mrs(x8, FPCR);
7853
7854  // All FPCR fields, including optional ones. This part of the test doesn't
7855  // achieve much other than ensuring that supported fields can be cleared by
7856  // the next test.
7857  __ Mov(x9, fpcr_all);
7858  __ Msr(FPCR, x9);
7859  __ Mrs(x9, FPCR);
7860  __ And(x9, x9, fpcr_core);
7861
7862  // The undefined bits must ignore writes.
7863  // It's conceivable that a future version of the architecture could use these
7864  // fields (making this test fail), but in the meantime this is a useful test
7865  // for the simulator.
7866  __ Mov(x10, ~fpcr_all);
7867  __ Msr(FPCR, x10);
7868  __ Mrs(x10, FPCR);
7869
7870  END();
7871
7872  RUN();
7873
7874  // We should have incremented x7 (from 0) exactly 8 times.
7875  ASSERT_EQUAL_64(8, x7);
7876
7877  ASSERT_EQUAL_64(fpcr_core, x8);
7878  ASSERT_EQUAL_64(fpcr_core, x9);
7879  ASSERT_EQUAL_64(0, x10);
7880
7881  TEARDOWN();
7882}
7883
7884
7885TEST(system_nop) {
7886  INIT_V8();
7887  SETUP();
7888  RegisterDump before;
7889
7890  START();
7891  before.Dump(&masm);
7892  __ Nop();
7893  END();
7894
7895  RUN();
7896
7897  ASSERT_EQUAL_REGISTERS(before);
7898  ASSERT_EQUAL_NZCV(before.flags_nzcv());
7899
7900  TEARDOWN();
7901}
7902
7903
7904TEST(zero_dest) {
7905  INIT_V8();
7906  SETUP();
7907  RegisterDump before;
7908
7909  START();
7910  // Preserve the system stack pointer, in case we clobber it.
7911  __ Mov(x30, csp);
7912  // Initialize the other registers used in this test.
7913  uint64_t literal_base = 0x0100001000100101UL;
7914  __ Mov(x0, 0);
7915  __ Mov(x1, literal_base);
7916  for (unsigned i = 2; i < x30.code(); i++) {
7917    __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7918  }
7919  before.Dump(&masm);
7920
7921  // All of these instructions should be NOPs in these forms, but have
7922  // alternate forms which can write into the stack pointer.
7923  __ add(xzr, x0, x1);
7924  __ add(xzr, x1, xzr);
7925  __ add(xzr, xzr, x1);
7926
7927  __ and_(xzr, x0, x2);
7928  __ and_(xzr, x2, xzr);
7929  __ and_(xzr, xzr, x2);
7930
7931  __ bic(xzr, x0, x3);
7932  __ bic(xzr, x3, xzr);
7933  __ bic(xzr, xzr, x3);
7934
7935  __ eon(xzr, x0, x4);
7936  __ eon(xzr, x4, xzr);
7937  __ eon(xzr, xzr, x4);
7938
7939  __ eor(xzr, x0, x5);
7940  __ eor(xzr, x5, xzr);
7941  __ eor(xzr, xzr, x5);
7942
7943  __ orr(xzr, x0, x6);
7944  __ orr(xzr, x6, xzr);
7945  __ orr(xzr, xzr, x6);
7946
7947  __ sub(xzr, x0, x7);
7948  __ sub(xzr, x7, xzr);
7949  __ sub(xzr, xzr, x7);
7950
7951  // Swap the saved system stack pointer with the real one. If csp was written
7952  // during the test, it will show up in x30. This is done because the test
7953  // framework assumes that csp will be valid at the end of the test.
7954  __ Mov(x29, x30);
7955  __ Mov(x30, csp);
7956  __ Mov(csp, x29);
7957  // We used x29 as a scratch register, so reset it to make sure it doesn't
7958  // trigger a test failure.
7959  __ Add(x29, x28, x1);
7960  END();
7961
7962  RUN();
7963
7964  ASSERT_EQUAL_REGISTERS(before);
7965  ASSERT_EQUAL_NZCV(before.flags_nzcv());
7966
7967  TEARDOWN();
7968}
7969
7970
7971TEST(zero_dest_setflags) {
7972  INIT_V8();
7973  SETUP();
7974  RegisterDump before;
7975
7976  START();
7977  // Preserve the system stack pointer, in case we clobber it.
7978  __ Mov(x30, csp);
7979  // Initialize the other registers used in this test.
7980  uint64_t literal_base = 0x0100001000100101UL;
7981  __ Mov(x0, 0);
7982  __ Mov(x1, literal_base);
7983  for (int i = 2; i < 30; i++) {
7984    __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7985  }
7986  before.Dump(&masm);
7987
7988  // All of these instructions should only write to the flags in these forms,
7989  // but have alternate forms which can write into the stack pointer.
7990  __ adds(xzr, x0, Operand(x1, UXTX));
7991  __ adds(xzr, x1, Operand(xzr, UXTX));
7992  __ adds(xzr, x1, 1234);
7993  __ adds(xzr, x0, x1);
7994  __ adds(xzr, x1, xzr);
7995  __ adds(xzr, xzr, x1);
7996
7997  __ ands(xzr, x2, ~0xf);
7998  __ ands(xzr, xzr, ~0xf);
7999  __ ands(xzr, x0, x2);
8000  __ ands(xzr, x2, xzr);
8001  __ ands(xzr, xzr, x2);
8002
8003  __ bics(xzr, x3, ~0xf);
8004  __ bics(xzr, xzr, ~0xf);
8005  __ bics(xzr, x0, x3);
8006  __ bics(xzr, x3, xzr);
8007  __ bics(xzr, xzr, x3);
8008
8009  __ subs(xzr, x0, Operand(x3, UXTX));
8010  __ subs(xzr, x3, Operand(xzr, UXTX));
8011  __ subs(xzr, x3, 1234);
8012  __ subs(xzr, x0, x3);
8013  __ subs(xzr, x3, xzr);
8014  __ subs(xzr, xzr, x3);
8015
8016  // Swap the saved system stack pointer with the real one. If csp was written
8017  // during the test, it will show up in x30. This is done because the test
8018  // framework assumes that csp will be valid at the end of the test.
8019  __ Mov(x29, x30);
8020  __ Mov(x30, csp);
8021  __ Mov(csp, x29);
8022  // We used x29 as a scratch register, so reset it to make sure it doesn't
8023  // trigger a test failure.
8024  __ Add(x29, x28, x1);
8025  END();
8026
8027  RUN();
8028
8029  ASSERT_EQUAL_REGISTERS(before);
8030
8031  TEARDOWN();
8032}
8033
8034
8035TEST(register_bit) {
8036  // No code generation takes place in this test, so no need to setup and
8037  // teardown.
8038
8039  // Simple tests.
8040  CHECK(x0.Bit() == (1UL << 0));
8041  CHECK(x1.Bit() == (1UL << 1));
8042  CHECK(x10.Bit() == (1UL << 10));
8043
8044  // AAPCS64 definitions.
8045  CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
8046  CHECK(lr.Bit() == (1UL << kLinkRegCode));
8047
8048  // Fixed (hardware) definitions.
8049  CHECK(xzr.Bit() == (1UL << kZeroRegCode));
8050
8051  // Internal ABI definitions.
8052  CHECK(jssp.Bit() == (1UL << kJSSPCode));
8053  CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
8054  CHECK(csp.Bit() != xzr.Bit());
8055
8056  // xn.Bit() == wn.Bit() at all times, for the same n.
8057  CHECK(x0.Bit() == w0.Bit());
8058  CHECK(x1.Bit() == w1.Bit());
8059  CHECK(x10.Bit() == w10.Bit());
8060  CHECK(jssp.Bit() == wjssp.Bit());
8061  CHECK(xzr.Bit() == wzr.Bit());
8062  CHECK(csp.Bit() == wcsp.Bit());
8063}
8064
8065
8066TEST(stack_pointer_override) {
8067  // This test generates some stack maintenance code, but the test only checks
8068  // the reported state.
8069  INIT_V8();
8070  SETUP();
8071  START();
8072
8073  // The default stack pointer in V8 is jssp, but for compatibility with W16,
8074  // the test framework sets it to csp before calling the test.
8075  CHECK(csp.Is(__ StackPointer()));
8076  __ SetStackPointer(x0);
8077  CHECK(x0.Is(__ StackPointer()));
8078  __ SetStackPointer(jssp);
8079  CHECK(jssp.Is(__ StackPointer()));
8080  __ SetStackPointer(csp);
8081  CHECK(csp.Is(__ StackPointer()));
8082
8083  END();
8084  RUN();
8085  TEARDOWN();
8086}
8087
8088
8089TEST(peek_poke_simple) {
8090  INIT_V8();
8091  SETUP();
8092  START();
8093
8094  static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
8095  static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
8096                                    x12.Bit() | x13.Bit();
8097
8098  // The literal base is chosen to have two useful properties:
8099  //  * When multiplied by small values (such as a register index), this value
8100  //    is clearly readable in the result.
8101  //  * The value is not formed from repeating fixed-size smaller values, so it
8102  //    can be used to detect endianness-related errors.
8103  uint64_t literal_base = 0x0100001000100101UL;
8104
8105  // Initialize the registers.
8106  __ Mov(x0, literal_base);
8107  __ Add(x1, x0, x0);
8108  __ Add(x2, x1, x0);
8109  __ Add(x3, x2, x0);
8110
8111  __ Claim(4);
8112
8113  // Simple exchange.
8114  //  After this test:
8115  //    x0-x3 should be unchanged.
8116  //    w10-w13 should contain the lower words of x0-x3.
8117  __ Poke(x0, 0);
8118  __ Poke(x1, 8);
8119  __ Poke(x2, 16);
8120  __ Poke(x3, 24);
8121  Clobber(&masm, x0_to_x3);
8122  __ Peek(x0, 0);
8123  __ Peek(x1, 8);
8124  __ Peek(x2, 16);
8125  __ Peek(x3, 24);
8126
8127  __ Poke(w0, 0);
8128  __ Poke(w1, 4);
8129  __ Poke(w2, 8);
8130  __ Poke(w3, 12);
8131  Clobber(&masm, x10_to_x13);
8132  __ Peek(w10, 0);
8133  __ Peek(w11, 4);
8134  __ Peek(w12, 8);
8135  __ Peek(w13, 12);
8136
8137  __ Drop(4);
8138
8139  END();
8140  RUN();
8141
8142  ASSERT_EQUAL_64(literal_base * 1, x0);
8143  ASSERT_EQUAL_64(literal_base * 2, x1);
8144  ASSERT_EQUAL_64(literal_base * 3, x2);
8145  ASSERT_EQUAL_64(literal_base * 4, x3);
8146
8147  ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8148  ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8149  ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8150  ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
8151
8152  TEARDOWN();
8153}
8154
8155
8156TEST(peek_poke_unaligned) {
8157  INIT_V8();
8158  SETUP();
8159  START();
8160
8161  // The literal base is chosen to have two useful properties:
8162  //  * When multiplied by small values (such as a register index), this value
8163  //    is clearly readable in the result.
8164  //  * The value is not formed from repeating fixed-size smaller values, so it
8165  //    can be used to detect endianness-related errors.
8166  uint64_t literal_base = 0x0100001000100101UL;
8167
8168  // Initialize the registers.
8169  __ Mov(x0, literal_base);
8170  __ Add(x1, x0, x0);
8171  __ Add(x2, x1, x0);
8172  __ Add(x3, x2, x0);
8173  __ Add(x4, x3, x0);
8174  __ Add(x5, x4, x0);
8175  __ Add(x6, x5, x0);
8176
8177  __ Claim(4);
8178
8179  // Unaligned exchanges.
8180  //  After this test:
8181  //    x0-x6 should be unchanged.
8182  //    w10-w12 should contain the lower words of x0-x2.
8183  __ Poke(x0, 1);
8184  Clobber(&masm, x0.Bit());
8185  __ Peek(x0, 1);
8186  __ Poke(x1, 2);
8187  Clobber(&masm, x1.Bit());
8188  __ Peek(x1, 2);
8189  __ Poke(x2, 3);
8190  Clobber(&masm, x2.Bit());
8191  __ Peek(x2, 3);
8192  __ Poke(x3, 4);
8193  Clobber(&masm, x3.Bit());
8194  __ Peek(x3, 4);
8195  __ Poke(x4, 5);
8196  Clobber(&masm, x4.Bit());
8197  __ Peek(x4, 5);
8198  __ Poke(x5, 6);
8199  Clobber(&masm, x5.Bit());
8200  __ Peek(x5, 6);
8201  __ Poke(x6, 7);
8202  Clobber(&masm, x6.Bit());
8203  __ Peek(x6, 7);
8204
8205  __ Poke(w0, 1);
8206  Clobber(&masm, w10.Bit());
8207  __ Peek(w10, 1);
8208  __ Poke(w1, 2);
8209  Clobber(&masm, w11.Bit());
8210  __ Peek(w11, 2);
8211  __ Poke(w2, 3);
8212  Clobber(&masm, w12.Bit());
8213  __ Peek(w12, 3);
8214
8215  __ Drop(4);
8216
8217  END();
8218  RUN();
8219
8220  ASSERT_EQUAL_64(literal_base * 1, x0);
8221  ASSERT_EQUAL_64(literal_base * 2, x1);
8222  ASSERT_EQUAL_64(literal_base * 3, x2);
8223  ASSERT_EQUAL_64(literal_base * 4, x3);
8224  ASSERT_EQUAL_64(literal_base * 5, x4);
8225  ASSERT_EQUAL_64(literal_base * 6, x5);
8226  ASSERT_EQUAL_64(literal_base * 7, x6);
8227
8228  ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8229  ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8230  ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8231
8232  TEARDOWN();
8233}
8234
8235
8236TEST(peek_poke_endianness) {
8237  INIT_V8();
8238  SETUP();
8239  START();
8240
8241  // The literal base is chosen to have two useful properties:
8242  //  * When multiplied by small values (such as a register index), this value
8243  //    is clearly readable in the result.
8244  //  * The value is not formed from repeating fixed-size smaller values, so it
8245  //    can be used to detect endianness-related errors.
8246  uint64_t literal_base = 0x0100001000100101UL;
8247
8248  // Initialize the registers.
8249  __ Mov(x0, literal_base);
8250  __ Add(x1, x0, x0);
8251
8252  __ Claim(4);
8253
8254  // Endianness tests.
8255  //  After this section:
8256  //    x4 should match x0[31:0]:x0[63:32]
8257  //    w5 should match w1[15:0]:w1[31:16]
8258  __ Poke(x0, 0);
8259  __ Poke(x0, 8);
8260  __ Peek(x4, 4);
8261
8262  __ Poke(w1, 0);
8263  __ Poke(w1, 4);
8264  __ Peek(w5, 2);
8265
8266  __ Drop(4);
8267
8268  END();
8269  RUN();
8270
8271  uint64_t x0_expected = literal_base * 1;
8272  uint64_t x1_expected = literal_base * 2;
8273  uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
8274  uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
8275                         ((x1_expected >> 16) & 0x0000ffff);
8276
8277  ASSERT_EQUAL_64(x0_expected, x0);
8278  ASSERT_EQUAL_64(x1_expected, x1);
8279  ASSERT_EQUAL_64(x4_expected, x4);
8280  ASSERT_EQUAL_64(x5_expected, x5);
8281
8282  TEARDOWN();
8283}
8284
8285
8286TEST(peek_poke_mixed) {
8287  INIT_V8();
8288  SETUP();
8289  START();
8290
8291  // The literal base is chosen to have two useful properties:
8292  //  * When multiplied by small values (such as a register index), this value
8293  //    is clearly readable in the result.
8294  //  * The value is not formed from repeating fixed-size smaller values, so it
8295  //    can be used to detect endianness-related errors.
8296  uint64_t literal_base = 0x0100001000100101UL;
8297
8298  // Initialize the registers.
8299  __ Mov(x0, literal_base);
8300  __ Add(x1, x0, x0);
8301  __ Add(x2, x1, x0);
8302  __ Add(x3, x2, x0);
8303
8304  __ Claim(4);
8305
8306  // Mix with other stack operations.
8307  //  After this section:
8308  //    x0-x3 should be unchanged.
8309  //    x6 should match x1[31:0]:x0[63:32]
8310  //    w7 should match x1[15:0]:x0[63:48]
8311  __ Poke(x1, 8);
8312  __ Poke(x0, 0);
8313  {
8314    ASSERT(__ StackPointer().Is(csp));
8315    __ Mov(x4, __ StackPointer());
8316    __ SetStackPointer(x4);
8317
8318    __ Poke(wzr, 0);    // Clobber the space we're about to drop.
8319    __ Drop(1, kWRegSize);
8320    __ Peek(x6, 0);
8321    __ Claim(1);
8322    __ Peek(w7, 10);
8323    __ Poke(x3, 28);
8324    __ Poke(xzr, 0);    // Clobber the space we're about to drop.
8325    __ Drop(1);
8326    __ Poke(x2, 12);
8327    __ Push(w0);
8328
8329    __ Mov(csp, __ StackPointer());
8330    __ SetStackPointer(csp);
8331  }
8332
8333  __ Pop(x0, x1, x2, x3);
8334
8335  END();
8336  RUN();
8337
8338  uint64_t x0_expected = literal_base * 1;
8339  uint64_t x1_expected = literal_base * 2;
8340  uint64_t x2_expected = literal_base * 3;
8341  uint64_t x3_expected = literal_base * 4;
8342  uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
8343  uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8344                         ((x0_expected >> 48) & 0x0000ffff);
8345
8346  ASSERT_EQUAL_64(x0_expected, x0);
8347  ASSERT_EQUAL_64(x1_expected, x1);
8348  ASSERT_EQUAL_64(x2_expected, x2);
8349  ASSERT_EQUAL_64(x3_expected, x3);
8350  ASSERT_EQUAL_64(x6_expected, x6);
8351  ASSERT_EQUAL_64(x7_expected, x7);
8352
8353  TEARDOWN();
8354}
8355
8356
8357// This enum is used only as an argument to the push-pop test helpers.
8358enum PushPopMethod {
8359  // Push or Pop using the Push and Pop methods, with blocks of up to four
8360  // registers. (Smaller blocks will be used if necessary.)
8361  PushPopByFour,
8362
8363  // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8364  PushPopRegList
8365};
8366
8367
8368// The maximum number of registers that can be used by the PushPopJssp* tests,
8369// where a reg_count field is provided.
8370static int const kPushPopJsspMaxRegCount = -1;
8371
8372// Test a simple push-pop pattern:
8373//  * Claim <claim> bytes to set the stack alignment.
8374//  * Push <reg_count> registers with size <reg_size>.
8375//  * Clobber the register contents.
8376//  * Pop <reg_count> registers to restore the original contents.
8377//  * Drop <claim> bytes to restore the original stack pointer.
8378//
8379// Different push and pop methods can be specified independently to test for
8380// proper word-endian behaviour.
8381static void PushPopJsspSimpleHelper(int reg_count,
8382                                    int claim,
8383                                    int reg_size,
8384                                    PushPopMethod push_method,
8385                                    PushPopMethod pop_method) {
8386  SETUP();
8387
8388  START();
8389
8390  // Registers in the TmpList can be used by the macro assembler for debug code
8391  // (for example in 'Pop'), so we can't use them here. We can't use jssp
8392  // because it will be the stack pointer for this test.
8393  static RegList const allowed = ~(masm.TmpList()->list() | jssp.Bit());
8394  if (reg_count == kPushPopJsspMaxRegCount) {
8395    reg_count = CountSetBits(allowed, kNumberOfRegisters);
8396  }
8397  // Work out which registers to use, based on reg_size.
8398  Register r[kNumberOfRegisters];
8399  Register x[kNumberOfRegisters];
8400  RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8401                                       allowed);
8402
8403  // The literal base is chosen to have two useful properties:
8404  //  * When multiplied by small values (such as a register index), this value
8405  //    is clearly readable in the result.
8406  //  * The value is not formed from repeating fixed-size smaller values, so it
8407  //    can be used to detect endianness-related errors.
8408  uint64_t literal_base = 0x0100001000100101UL;
8409
8410  {
8411    ASSERT(__ StackPointer().Is(csp));
8412    __ Mov(jssp, __ StackPointer());
8413    __ SetStackPointer(jssp);
8414
8415    int i;
8416
8417    // Initialize the registers.
8418    for (i = 0; i < reg_count; i++) {
8419      // Always write into the X register, to ensure that the upper word is
8420      // properly ignored by Push when testing W registers.
8421      if (!x[i].IsZero()) {
8422        __ Mov(x[i], literal_base * i);
8423      }
8424    }
8425
8426    // Claim memory first, as requested.
8427    __ Claim(claim, kByteSizeInBytes);
8428
8429    switch (push_method) {
8430      case PushPopByFour:
8431        // Push high-numbered registers first (to the highest addresses).
8432        for (i = reg_count; i >= 4; i -= 4) {
8433          __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8434        }
8435        // Finish off the leftovers.
8436        switch (i) {
8437          case 3:  __ Push(r[2], r[1], r[0]); break;
8438          case 2:  __ Push(r[1], r[0]);       break;
8439          case 1:  __ Push(r[0]);             break;
8440          default: ASSERT(i == 0);            break;
8441        }
8442        break;
8443      case PushPopRegList:
8444        __ PushSizeRegList(list, reg_size);
8445        break;
8446    }
8447
8448    // Clobber all the registers, to ensure that they get repopulated by Pop.
8449    Clobber(&masm, list);
8450
8451    switch (pop_method) {
8452      case PushPopByFour:
8453        // Pop low-numbered registers first (from the lowest addresses).
8454        for (i = 0; i <= (reg_count-4); i += 4) {
8455          __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8456        }
8457        // Finish off the leftovers.
8458        switch (reg_count - i) {
8459          case 3:  __ Pop(r[i], r[i+1], r[i+2]); break;
8460          case 2:  __ Pop(r[i], r[i+1]);         break;
8461          case 1:  __ Pop(r[i]);                 break;
8462          default: ASSERT(i == reg_count);       break;
8463        }
8464        break;
8465      case PushPopRegList:
8466        __ PopSizeRegList(list, reg_size);
8467        break;
8468    }
8469
8470    // Drop memory to restore jssp.
8471    __ Drop(claim, kByteSizeInBytes);
8472
8473    __ Mov(csp, __ StackPointer());
8474    __ SetStackPointer(csp);
8475  }
8476
8477  END();
8478
8479  RUN();
8480
8481  // Check that the register contents were preserved.
8482  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8483  // that the upper word was properly cleared by Pop.
8484  literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8485  for (int i = 0; i < reg_count; i++) {
8486    if (x[i].IsZero()) {
8487      ASSERT_EQUAL_64(0, x[i]);
8488    } else {
8489      ASSERT_EQUAL_64(literal_base * i, x[i]);
8490    }
8491  }
8492
8493  TEARDOWN();
8494}
8495
8496
8497TEST(push_pop_jssp_simple_32) {
8498  INIT_V8();
8499  for (int claim = 0; claim <= 8; claim++) {
8500    for (int count = 0; count <= 8; count++) {
8501      PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8502                              PushPopByFour, PushPopByFour);
8503      PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8504                              PushPopByFour, PushPopRegList);
8505      PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8506                              PushPopRegList, PushPopByFour);
8507      PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8508                              PushPopRegList, PushPopRegList);
8509    }
8510    // Test with the maximum number of registers.
8511    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8512                            PushPopByFour, PushPopByFour);
8513    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8514                            PushPopByFour, PushPopRegList);
8515    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8516                            PushPopRegList, PushPopByFour);
8517    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8518                            PushPopRegList, PushPopRegList);
8519  }
8520}
8521
8522
8523TEST(push_pop_jssp_simple_64) {
8524  INIT_V8();
8525  for (int claim = 0; claim <= 8; claim++) {
8526    for (int count = 0; count <= 8; count++) {
8527      PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8528                              PushPopByFour, PushPopByFour);
8529      PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8530                              PushPopByFour, PushPopRegList);
8531      PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8532                              PushPopRegList, PushPopByFour);
8533      PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8534                              PushPopRegList, PushPopRegList);
8535    }
8536    // Test with the maximum number of registers.
8537    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8538                            PushPopByFour, PushPopByFour);
8539    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8540                            PushPopByFour, PushPopRegList);
8541    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8542                            PushPopRegList, PushPopByFour);
8543    PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8544                            PushPopRegList, PushPopRegList);
8545  }
8546}
8547
8548
8549// The maximum number of registers that can be used by the PushPopFPJssp* tests,
8550// where a reg_count field is provided.
8551static int const kPushPopFPJsspMaxRegCount = -1;
8552
8553// Test a simple push-pop pattern:
8554//  * Claim <claim> bytes to set the stack alignment.
8555//  * Push <reg_count> FP registers with size <reg_size>.
8556//  * Clobber the register contents.
8557//  * Pop <reg_count> FP registers to restore the original contents.
8558//  * Drop <claim> bytes to restore the original stack pointer.
8559//
8560// Different push and pop methods can be specified independently to test for
8561// proper word-endian behaviour.
8562static void PushPopFPJsspSimpleHelper(int reg_count,
8563                                      int claim,
8564                                      int reg_size,
8565                                      PushPopMethod push_method,
8566                                      PushPopMethod pop_method) {
8567  SETUP();
8568
8569  START();
8570
8571  // We can use any floating-point register. None of them are reserved for
8572  // debug code, for example.
8573  static RegList const allowed = ~0;
8574  if (reg_count == kPushPopFPJsspMaxRegCount) {
8575    reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8576  }
8577  // Work out which registers to use, based on reg_size.
8578  FPRegister v[kNumberOfRegisters];
8579  FPRegister d[kNumberOfRegisters];
8580  RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8581                                         allowed);
8582
8583  // The literal base is chosen to have two useful properties:
8584  //  * When multiplied (using an integer) by small values (such as a register
8585  //    index), this value is clearly readable in the result.
8586  //  * The value is not formed from repeating fixed-size smaller values, so it
8587  //    can be used to detect endianness-related errors.
8588  //  * It is never a floating-point NaN, and will therefore always compare
8589  //    equal to itself.
8590  uint64_t literal_base = 0x0100001000100101UL;
8591
8592  {
8593    ASSERT(__ StackPointer().Is(csp));
8594    __ Mov(jssp, __ StackPointer());
8595    __ SetStackPointer(jssp);
8596
8597    int i;
8598
8599    // Initialize the registers, using X registers to load the literal.
8600    __ Mov(x0, 0);
8601    __ Mov(x1, literal_base);
8602    for (i = 0; i < reg_count; i++) {
8603      // Always write into the D register, to ensure that the upper word is
8604      // properly ignored by Push when testing S registers.
8605      __ Fmov(d[i], x0);
8606      // Calculate the next literal.
8607      __ Add(x0, x0, x1);
8608    }
8609
8610    // Claim memory first, as requested.
8611    __ Claim(claim, kByteSizeInBytes);
8612
8613    switch (push_method) {
8614      case PushPopByFour:
8615        // Push high-numbered registers first (to the highest addresses).
8616        for (i = reg_count; i >= 4; i -= 4) {
8617          __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8618        }
8619        // Finish off the leftovers.
8620        switch (i) {
8621          case 3:  __ Push(v[2], v[1], v[0]); break;
8622          case 2:  __ Push(v[1], v[0]);       break;
8623          case 1:  __ Push(v[0]);             break;
8624          default: ASSERT(i == 0);            break;
8625        }
8626        break;
8627      case PushPopRegList:
8628        __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8629        break;
8630    }
8631
8632    // Clobber all the registers, to ensure that they get repopulated by Pop.
8633    ClobberFP(&masm, list);
8634
8635    switch (pop_method) {
8636      case PushPopByFour:
8637        // Pop low-numbered registers first (from the lowest addresses).
8638        for (i = 0; i <= (reg_count-4); i += 4) {
8639          __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8640        }
8641        // Finish off the leftovers.
8642        switch (reg_count - i) {
8643          case 3:  __ Pop(v[i], v[i+1], v[i+2]); break;
8644          case 2:  __ Pop(v[i], v[i+1]);         break;
8645          case 1:  __ Pop(v[i]);                 break;
8646          default: ASSERT(i == reg_count);       break;
8647        }
8648        break;
8649      case PushPopRegList:
8650        __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8651        break;
8652    }
8653
8654    // Drop memory to restore jssp.
8655    __ Drop(claim, kByteSizeInBytes);
8656
8657    __ Mov(csp, __ StackPointer());
8658    __ SetStackPointer(csp);
8659  }
8660
8661  END();
8662
8663  RUN();
8664
8665  // Check that the register contents were preserved.
8666  // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
8667  // test that the upper word was properly cleared by Pop.
8668  literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8669  for (int i = 0; i < reg_count; i++) {
8670    uint64_t literal = literal_base * i;
8671    double expected;
8672    memcpy(&expected, &literal, sizeof(expected));
8673    ASSERT_EQUAL_FP64(expected, d[i]);
8674  }
8675
8676  TEARDOWN();
8677}
8678
8679
8680TEST(push_pop_fp_jssp_simple_32) {
8681  INIT_V8();
8682  for (int claim = 0; claim <= 8; claim++) {
8683    for (int count = 0; count <= 8; count++) {
8684      PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8685                                PushPopByFour, PushPopByFour);
8686      PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8687                                PushPopByFour, PushPopRegList);
8688      PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8689                                PushPopRegList, PushPopByFour);
8690      PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8691                                PushPopRegList, PushPopRegList);
8692    }
8693    // Test with the maximum number of registers.
8694    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8695                              PushPopByFour, PushPopByFour);
8696    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8697                              PushPopByFour, PushPopRegList);
8698    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8699                              PushPopRegList, PushPopByFour);
8700    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8701                              PushPopRegList, PushPopRegList);
8702  }
8703}
8704
8705
8706TEST(push_pop_fp_jssp_simple_64) {
8707  INIT_V8();
8708  for (int claim = 0; claim <= 8; claim++) {
8709    for (int count = 0; count <= 8; count++) {
8710      PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8711                                PushPopByFour, PushPopByFour);
8712      PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8713                                PushPopByFour, PushPopRegList);
8714      PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8715                                PushPopRegList, PushPopByFour);
8716      PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8717                                PushPopRegList, PushPopRegList);
8718    }
8719    // Test with the maximum number of registers.
8720    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8721                              PushPopByFour, PushPopByFour);
8722    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8723                              PushPopByFour, PushPopRegList);
8724    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8725                              PushPopRegList, PushPopByFour);
8726    PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8727                              PushPopRegList, PushPopRegList);
8728  }
8729}
8730
8731
8732// Push and pop data using an overlapping combination of Push/Pop and
8733// RegList-based methods.
8734static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8735  SETUP();
8736
8737  // Registers x8 and x9 are used by the macro assembler for debug code (for
8738  // example in 'Pop'), so we can't use them here. We can't use jssp because it
8739  // will be the stack pointer for this test.
8740  static RegList const allowed =
8741      ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
8742  // Work out which registers to use, based on reg_size.
8743  Register r[10];
8744  Register x[10];
8745  PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8746
8747  // Calculate some handy register lists.
8748  RegList r0_to_r3 = 0;
8749  for (int i = 0; i <= 3; i++) {
8750    r0_to_r3 |= x[i].Bit();
8751  }
8752  RegList r4_to_r5 = 0;
8753  for (int i = 4; i <= 5; i++) {
8754    r4_to_r5 |= x[i].Bit();
8755  }
8756  RegList r6_to_r9 = 0;
8757  for (int i = 6; i <= 9; i++) {
8758    r6_to_r9 |= x[i].Bit();
8759  }
8760
8761  // The literal base is chosen to have two useful properties:
8762  //  * When multiplied by small values (such as a register index), this value
8763  //    is clearly readable in the result.
8764  //  * The value is not formed from repeating fixed-size smaller values, so it
8765  //    can be used to detect endianness-related errors.
8766  uint64_t literal_base = 0x0100001000100101UL;
8767
8768  START();
8769  {
8770    ASSERT(__ StackPointer().Is(csp));
8771    __ Mov(jssp, __ StackPointer());
8772    __ SetStackPointer(jssp);
8773
8774    // Claim memory first, as requested.
8775    __ Claim(claim, kByteSizeInBytes);
8776
8777    __ Mov(x[3], literal_base * 3);
8778    __ Mov(x[2], literal_base * 2);
8779    __ Mov(x[1], literal_base * 1);
8780    __ Mov(x[0], literal_base * 0);
8781
8782    __ PushSizeRegList(r0_to_r3, reg_size);
8783    __ Push(r[3], r[2]);
8784
8785    Clobber(&masm, r0_to_r3);
8786    __ PopSizeRegList(r0_to_r3, reg_size);
8787
8788    __ Push(r[2], r[1], r[3], r[0]);
8789
8790    Clobber(&masm, r4_to_r5);
8791    __ Pop(r[4], r[5]);
8792    Clobber(&masm, r6_to_r9);
8793    __ Pop(r[6], r[7], r[8], r[9]);
8794
8795    // Drop memory to restore jssp.
8796    __ Drop(claim, kByteSizeInBytes);
8797
8798    __ Mov(csp, __ StackPointer());
8799    __ SetStackPointer(csp);
8800  }
8801
8802  END();
8803
8804  RUN();
8805
8806  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8807  // that the upper word was properly cleared by Pop.
8808  literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8809
8810  ASSERT_EQUAL_64(literal_base * 3, x[9]);
8811  ASSERT_EQUAL_64(literal_base * 2, x[8]);
8812  ASSERT_EQUAL_64(literal_base * 0, x[7]);
8813  ASSERT_EQUAL_64(literal_base * 3, x[6]);
8814  ASSERT_EQUAL_64(literal_base * 1, x[5]);
8815  ASSERT_EQUAL_64(literal_base * 2, x[4]);
8816
8817  TEARDOWN();
8818}
8819
8820
8821TEST(push_pop_jssp_mixed_methods_64) {
8822  INIT_V8();
8823  for (int claim = 0; claim <= 8; claim++) {
8824    PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
8825  }
8826}
8827
8828
8829TEST(push_pop_jssp_mixed_methods_32) {
8830  INIT_V8();
8831  for (int claim = 0; claim <= 8; claim++) {
8832    PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
8833  }
8834}
8835
8836
8837// Push and pop data using overlapping X- and W-sized quantities.
8838static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
8839  // This test emits rather a lot of code.
8840  SETUP_SIZE(BUF_SIZE * 2);
8841
8842  // Work out which registers to use, based on reg_size.
8843  Register tmp = x8;
8844  static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
8845  if (reg_count == kPushPopJsspMaxRegCount) {
8846    reg_count = CountSetBits(allowed, kNumberOfRegisters);
8847  }
8848  Register w[kNumberOfRegisters];
8849  Register x[kNumberOfRegisters];
8850  RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
8851
8852  // The number of W-sized slots we expect to pop. When we pop, we alternate
8853  // between W and X registers, so we need reg_count*1.5 W-sized slots.
8854  int const requested_w_slots = reg_count + reg_count / 2;
8855
8856  // Track what _should_ be on the stack, using W-sized slots.
8857  static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
8858  uint32_t stack[kMaxWSlots];
8859  for (int i = 0; i < kMaxWSlots; i++) {
8860    stack[i] = 0xdeadbeef;
8861  }
8862
8863  // The literal base is chosen to have two useful properties:
8864  //  * When multiplied by small values (such as a register index), this value
8865  //    is clearly readable in the result.
8866  //  * The value is not formed from repeating fixed-size smaller values, so it
8867  //    can be used to detect endianness-related errors.
8868  static uint64_t const literal_base = 0x0100001000100101UL;
8869  static uint64_t const literal_base_hi = literal_base >> 32;
8870  static uint64_t const literal_base_lo = literal_base & 0xffffffff;
8871  static uint64_t const literal_base_w = literal_base & 0xffffffff;
8872
8873  START();
8874  {
8875    ASSERT(__ StackPointer().Is(csp));
8876    __ Mov(jssp, __ StackPointer());
8877    __ SetStackPointer(jssp);
8878
8879    // Initialize the registers.
8880    for (int i = 0; i < reg_count; i++) {
8881      // Always write into the X register, to ensure that the upper word is
8882      // properly ignored by Push when testing W registers.
8883      if (!x[i].IsZero()) {
8884        __ Mov(x[i], literal_base * i);
8885      }
8886    }
8887
8888    // Claim memory first, as requested.
8889    __ Claim(claim, kByteSizeInBytes);
8890
8891    // The push-pop pattern is as follows:
8892    // Push:           Pop:
8893    //  x[0](hi)   ->   w[0]
8894    //  x[0](lo)   ->   x[1](hi)
8895    //  w[1]       ->   x[1](lo)
8896    //  w[1]       ->   w[2]
8897    //  x[2](hi)   ->   x[2](hi)
8898    //  x[2](lo)   ->   x[2](lo)
8899    //  x[2](hi)   ->   w[3]
8900    //  x[2](lo)   ->   x[4](hi)
8901    //  x[2](hi)   ->   x[4](lo)
8902    //  x[2](lo)   ->   w[5]
8903    //  w[3]       ->   x[5](hi)
8904    //  w[3]       ->   x[6](lo)
8905    //  w[3]       ->   w[7]
8906    //  w[3]       ->   x[8](hi)
8907    //  x[4](hi)   ->   x[8](lo)
8908    //  x[4](lo)   ->   w[9]
8909    // ... pattern continues ...
8910    //
8911    // That is, registers are pushed starting with the lower numbers,
8912    // alternating between x and w registers, and pushing i%4+1 copies of each,
8913    // where i is the register number.
8914    // Registers are popped starting with the higher numbers one-by-one,
8915    // alternating between x and w registers, but only popping one at a time.
8916    //
8917    // This pattern provides a wide variety of alignment effects and overlaps.
8918
8919    // ---- Push ----
8920
8921    int active_w_slots = 0;
8922    for (int i = 0; active_w_slots < requested_w_slots; i++) {
8923      ASSERT(i < reg_count);
8924      // In order to test various arguments to PushMultipleTimes, and to try to
8925      // exercise different alignment and overlap effects, we push each
8926      // register a different number of times.
8927      int times = i % 4 + 1;
8928      if (i & 1) {
8929        // Push odd-numbered registers as W registers.
8930        if (i & 2) {
8931          __ PushMultipleTimes(w[i], times);
8932        } else {
8933          // Use a register to specify the count.
8934          __ Mov(tmp.W(), times);
8935          __ PushMultipleTimes(w[i], tmp.W());
8936        }
8937        // Fill in the expected stack slots.
8938        for (int j = 0; j < times; j++) {
8939          if (w[i].Is(wzr)) {
8940            // The zero register always writes zeroes.
8941            stack[active_w_slots++] = 0;
8942          } else {
8943            stack[active_w_slots++] = literal_base_w * i;
8944          }
8945        }
8946      } else {
8947        // Push even-numbered registers as X registers.
8948        if (i & 2) {
8949          __ PushMultipleTimes(x[i], times);
8950        } else {
8951          // Use a register to specify the count.
8952          __ Mov(tmp, times);
8953          __ PushMultipleTimes(x[i], tmp);
8954        }
8955        // Fill in the expected stack slots.
8956        for (int j = 0; j < times; j++) {
8957          if (x[i].IsZero()) {
8958            // The zero register always writes zeroes.
8959            stack[active_w_slots++] = 0;
8960            stack[active_w_slots++] = 0;
8961          } else {
8962            stack[active_w_slots++] = literal_base_hi * i;
8963            stack[active_w_slots++] = literal_base_lo * i;
8964          }
8965        }
8966      }
8967    }
8968    // Because we were pushing several registers at a time, we probably pushed
8969    // more than we needed to.
8970    if (active_w_slots > requested_w_slots) {
8971      __ Drop(active_w_slots - requested_w_slots, kWRegSize);
8972      // Bump the number of active W-sized slots back to where it should be,
8973      // and fill the empty space with a dummy value.
8974      do {
8975        stack[active_w_slots--] = 0xdeadbeef;
8976      } while (active_w_slots > requested_w_slots);
8977    }
8978
8979    // ---- Pop ----
8980
8981    Clobber(&masm, list);
8982
8983    // If popping an even number of registers, the first one will be X-sized.
8984    // Otherwise, the first one will be W-sized.
8985    bool next_is_64 = !(reg_count & 1);
8986    for (int i = reg_count-1; i >= 0; i--) {
8987      if (next_is_64) {
8988        __ Pop(x[i]);
8989        active_w_slots -= 2;
8990      } else {
8991        __ Pop(w[i]);
8992        active_w_slots -= 1;
8993      }
8994      next_is_64 = !next_is_64;
8995    }
8996    ASSERT(active_w_slots == 0);
8997
8998    // Drop memory to restore jssp.
8999    __ Drop(claim, kByteSizeInBytes);
9000
9001    __ Mov(csp, __ StackPointer());
9002    __ SetStackPointer(csp);
9003  }
9004
9005  END();
9006
9007  RUN();
9008
9009  int slot = 0;
9010  for (int i = 0; i < reg_count; i++) {
9011    // Even-numbered registers were written as W registers.
9012    // Odd-numbered registers were written as X registers.
9013    bool expect_64 = (i & 1);
9014    uint64_t expected;
9015
9016    if (expect_64) {
9017      uint64_t hi = stack[slot++];
9018      uint64_t lo = stack[slot++];
9019      expected = (hi << 32) | lo;
9020    } else {
9021      expected = stack[slot++];
9022    }
9023
9024    // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
9025    // test that the upper word was properly cleared by Pop.
9026    if (x[i].IsZero()) {
9027      ASSERT_EQUAL_64(0, x[i]);
9028    } else {
9029      ASSERT_EQUAL_64(expected, x[i]);
9030    }
9031  }
9032  ASSERT(slot == requested_w_slots);
9033
9034  TEARDOWN();
9035}
9036
9037
9038TEST(push_pop_jssp_wx_overlap) {
9039  INIT_V8();
9040  for (int claim = 0; claim <= 8; claim++) {
9041    for (int count = 1; count <= 8; count++) {
9042      PushPopJsspWXOverlapHelper(count, claim);
9043      PushPopJsspWXOverlapHelper(count, claim);
9044      PushPopJsspWXOverlapHelper(count, claim);
9045      PushPopJsspWXOverlapHelper(count, claim);
9046    }
9047    // Test with the maximum number of registers.
9048    PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9049    PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9050    PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9051    PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9052  }
9053}
9054
9055
9056TEST(push_pop_csp) {
9057  INIT_V8();
9058  SETUP();
9059
9060  START();
9061
9062  ASSERT(csp.Is(__ StackPointer()));
9063
9064  __ Mov(x3, 0x3333333333333333UL);
9065  __ Mov(x2, 0x2222222222222222UL);
9066  __ Mov(x1, 0x1111111111111111UL);
9067  __ Mov(x0, 0x0000000000000000UL);
9068  __ Claim(2);
9069  __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9070  __ Push(x3, x2);
9071  __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9072  __ Push(x2, x1, x3, x0);
9073  __ Pop(x4, x5);
9074  __ Pop(x6, x7, x8, x9);
9075
9076  __ Claim(2);
9077  __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
9078  __ Push(w3, w1, w2, w0);
9079  __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
9080  __ Pop(w14, w15, w16, w17);
9081
9082  __ Claim(2);
9083  __ Push(w2, w2, w1, w1);
9084  __ Push(x3, x3);
9085  __ Pop(w18, w19, w20, w21);
9086  __ Pop(x22, x23);
9087
9088  __ Claim(2);
9089  __ PushXRegList(x1.Bit() | x22.Bit());
9090  __ PopXRegList(x24.Bit() | x26.Bit());
9091
9092  __ Claim(2);
9093  __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
9094  __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
9095
9096  __ Claim(2);
9097  __ PushXRegList(0);
9098  __ PopXRegList(0);
9099  __ PushXRegList(0xffffffff);
9100  __ PopXRegList(0xffffffff);
9101  __ Drop(12);
9102
9103  END();
9104
9105  RUN();
9106
9107  ASSERT_EQUAL_64(0x1111111111111111UL, x3);
9108  ASSERT_EQUAL_64(0x0000000000000000UL, x2);
9109  ASSERT_EQUAL_64(0x3333333333333333UL, x1);
9110  ASSERT_EQUAL_64(0x2222222222222222UL, x0);
9111  ASSERT_EQUAL_64(0x3333333333333333UL, x9);
9112  ASSERT_EQUAL_64(0x2222222222222222UL, x8);
9113  ASSERT_EQUAL_64(0x0000000000000000UL, x7);
9114  ASSERT_EQUAL_64(0x3333333333333333UL, x6);
9115  ASSERT_EQUAL_64(0x1111111111111111UL, x5);
9116  ASSERT_EQUAL_64(0x2222222222222222UL, x4);
9117
9118  ASSERT_EQUAL_32(0x11111111U, w13);
9119  ASSERT_EQUAL_32(0x33333333U, w12);
9120  ASSERT_EQUAL_32(0x00000000U, w11);
9121  ASSERT_EQUAL_32(0x22222222U, w10);
9122  ASSERT_EQUAL_32(0x11111111U, w17);
9123  ASSERT_EQUAL_32(0x00000000U, w16);
9124  ASSERT_EQUAL_32(0x33333333U, w15);
9125  ASSERT_EQUAL_32(0x22222222U, w14);
9126
9127  ASSERT_EQUAL_32(0x11111111U, w18);
9128  ASSERT_EQUAL_32(0x11111111U, w19);
9129  ASSERT_EQUAL_32(0x11111111U, w20);
9130  ASSERT_EQUAL_32(0x11111111U, w21);
9131  ASSERT_EQUAL_64(0x3333333333333333UL, x22);
9132  ASSERT_EQUAL_64(0x0000000000000000UL, x23);
9133
9134  ASSERT_EQUAL_64(0x3333333333333333UL, x24);
9135  ASSERT_EQUAL_64(0x3333333333333333UL, x26);
9136
9137  ASSERT_EQUAL_32(0x33333333U, w25);
9138  ASSERT_EQUAL_32(0x00000000U, w27);
9139  ASSERT_EQUAL_32(0x22222222U, w28);
9140  ASSERT_EQUAL_32(0x33333333U, w29);
9141  TEARDOWN();
9142}
9143
9144
9145TEST(push_queued) {
9146  INIT_V8();
9147  SETUP();
9148
9149  START();
9150
9151  ASSERT(__ StackPointer().Is(csp));
9152  __ Mov(jssp, __ StackPointer());
9153  __ SetStackPointer(jssp);
9154
9155  MacroAssembler::PushPopQueue queue(&masm);
9156
9157  // Queue up registers.
9158  queue.Queue(x0);
9159  queue.Queue(x1);
9160  queue.Queue(x2);
9161  queue.Queue(x3);
9162
9163  queue.Queue(w4);
9164  queue.Queue(w5);
9165  queue.Queue(w6);
9166
9167  queue.Queue(d0);
9168  queue.Queue(d1);
9169
9170  queue.Queue(s2);
9171
9172  __ Mov(x0, 0x1234000000000000);
9173  __ Mov(x1, 0x1234000100010001);
9174  __ Mov(x2, 0x1234000200020002);
9175  __ Mov(x3, 0x1234000300030003);
9176  __ Mov(w4, 0x12340004);
9177  __ Mov(w5, 0x12340005);
9178  __ Mov(w6, 0x12340006);
9179  __ Fmov(d0, 123400.0);
9180  __ Fmov(d1, 123401.0);
9181  __ Fmov(s2, 123402.0);
9182
9183  // Actually push them.
9184  queue.PushQueued();
9185
9186  Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9187  Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
9188
9189  // Pop them conventionally.
9190  __ Pop(s2);
9191  __ Pop(d1, d0);
9192  __ Pop(w6, w5, w4);
9193  __ Pop(x3, x2, x1, x0);
9194
9195  __ Mov(csp, __ StackPointer());
9196  __ SetStackPointer(csp);
9197
9198  END();
9199
9200  RUN();
9201
9202  ASSERT_EQUAL_64(0x1234000000000000, x0);
9203  ASSERT_EQUAL_64(0x1234000100010001, x1);
9204  ASSERT_EQUAL_64(0x1234000200020002, x2);
9205  ASSERT_EQUAL_64(0x1234000300030003, x3);
9206
9207  ASSERT_EQUAL_32(0x12340004, w4);
9208  ASSERT_EQUAL_32(0x12340005, w5);
9209  ASSERT_EQUAL_32(0x12340006, w6);
9210
9211  ASSERT_EQUAL_FP64(123400.0, d0);
9212  ASSERT_EQUAL_FP64(123401.0, d1);
9213
9214  ASSERT_EQUAL_FP32(123402.0, s2);
9215
9216  TEARDOWN();
9217}
9218
9219
9220TEST(pop_queued) {
9221  INIT_V8();
9222  SETUP();
9223
9224  START();
9225
9226  ASSERT(__ StackPointer().Is(csp));
9227  __ Mov(jssp, __ StackPointer());
9228  __ SetStackPointer(jssp);
9229
9230  MacroAssembler::PushPopQueue queue(&masm);
9231
9232  __ Mov(x0, 0x1234000000000000);
9233  __ Mov(x1, 0x1234000100010001);
9234  __ Mov(x2, 0x1234000200020002);
9235  __ Mov(x3, 0x1234000300030003);
9236  __ Mov(w4, 0x12340004);
9237  __ Mov(w5, 0x12340005);
9238  __ Mov(w6, 0x12340006);
9239  __ Fmov(d0, 123400.0);
9240  __ Fmov(d1, 123401.0);
9241  __ Fmov(s2, 123402.0);
9242
9243  // Push registers conventionally.
9244  __ Push(x0, x1, x2, x3);
9245  __ Push(w4, w5, w6);
9246  __ Push(d0, d1);
9247  __ Push(s2);
9248
9249  // Queue up a pop.
9250  queue.Queue(s2);
9251
9252  queue.Queue(d1);
9253  queue.Queue(d0);
9254
9255  queue.Queue(w6);
9256  queue.Queue(w5);
9257  queue.Queue(w4);
9258
9259  queue.Queue(x3);
9260  queue.Queue(x2);
9261  queue.Queue(x1);
9262  queue.Queue(x0);
9263
9264  Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9265  Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
9266
9267  // Actually pop them.
9268  queue.PopQueued();
9269
9270  __ Mov(csp, __ StackPointer());
9271  __ SetStackPointer(csp);
9272
9273  END();
9274
9275  RUN();
9276
9277  ASSERT_EQUAL_64(0x1234000000000000, x0);
9278  ASSERT_EQUAL_64(0x1234000100010001, x1);
9279  ASSERT_EQUAL_64(0x1234000200020002, x2);
9280  ASSERT_EQUAL_64(0x1234000300030003, x3);
9281
9282  ASSERT_EQUAL_64(0x0000000012340004, x4);
9283  ASSERT_EQUAL_64(0x0000000012340005, x5);
9284  ASSERT_EQUAL_64(0x0000000012340006, x6);
9285
9286  ASSERT_EQUAL_FP64(123400.0, d0);
9287  ASSERT_EQUAL_FP64(123401.0, d1);
9288
9289  ASSERT_EQUAL_FP32(123402.0, s2);
9290
9291  TEARDOWN();
9292}
9293
9294
9295TEST(jump_both_smi) {
9296  INIT_V8();
9297  SETUP();
9298
9299  Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9300  Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9301  Label return1, return2, return3, done;
9302
9303  START();
9304
9305  __ Mov(x0, 0x5555555500000001UL);  // A pointer.
9306  __ Mov(x1, 0xaaaaaaaa00000001UL);  // A pointer.
9307  __ Mov(x2, 0x1234567800000000UL);  // A smi.
9308  __ Mov(x3, 0x8765432100000000UL);  // A smi.
9309  __ Mov(x4, 0xdead);
9310  __ Mov(x5, 0xdead);
9311  __ Mov(x6, 0xdead);
9312  __ Mov(x7, 0xdead);
9313
9314  __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9315  __ Bind(&return1);
9316  __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9317  __ Bind(&return2);
9318  __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9319  __ Bind(&return3);
9320  __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9321
9322  __ Bind(&cond_fail_00);
9323  __ Mov(x4, 0);
9324  __ B(&return1);
9325  __ Bind(&cond_pass_00);
9326  __ Mov(x4, 1);
9327  __ B(&return1);
9328
9329  __ Bind(&cond_fail_01);
9330  __ Mov(x5, 0);
9331  __ B(&return2);
9332  __ Bind(&cond_pass_01);
9333  __ Mov(x5, 1);
9334  __ B(&return2);
9335
9336  __ Bind(&cond_fail_10);
9337  __ Mov(x6, 0);
9338  __ B(&return3);
9339  __ Bind(&cond_pass_10);
9340  __ Mov(x6, 1);
9341  __ B(&return3);
9342
9343  __ Bind(&cond_fail_11);
9344  __ Mov(x7, 0);
9345  __ B(&done);
9346  __ Bind(&cond_pass_11);
9347  __ Mov(x7, 1);
9348
9349  __ Bind(&done);
9350
9351  END();
9352
9353  RUN();
9354
9355  ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9356  ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9357  ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9358  ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9359  ASSERT_EQUAL_64(0, x4);
9360  ASSERT_EQUAL_64(0, x5);
9361  ASSERT_EQUAL_64(0, x6);
9362  ASSERT_EQUAL_64(1, x7);
9363
9364  TEARDOWN();
9365}
9366
9367
9368TEST(jump_either_smi) {
9369  INIT_V8();
9370  SETUP();
9371
9372  Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9373  Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9374  Label return1, return2, return3, done;
9375
9376  START();
9377
9378  __ Mov(x0, 0x5555555500000001UL);  // A pointer.
9379  __ Mov(x1, 0xaaaaaaaa00000001UL);  // A pointer.
9380  __ Mov(x2, 0x1234567800000000UL);  // A smi.
9381  __ Mov(x3, 0x8765432100000000UL);  // A smi.
9382  __ Mov(x4, 0xdead);
9383  __ Mov(x5, 0xdead);
9384  __ Mov(x6, 0xdead);
9385  __ Mov(x7, 0xdead);
9386
9387  __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9388  __ Bind(&return1);
9389  __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9390  __ Bind(&return2);
9391  __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9392  __ Bind(&return3);
9393  __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9394
9395  __ Bind(&cond_fail_00);
9396  __ Mov(x4, 0);
9397  __ B(&return1);
9398  __ Bind(&cond_pass_00);
9399  __ Mov(x4, 1);
9400  __ B(&return1);
9401
9402  __ Bind(&cond_fail_01);
9403  __ Mov(x5, 0);
9404  __ B(&return2);
9405  __ Bind(&cond_pass_01);
9406  __ Mov(x5, 1);
9407  __ B(&return2);
9408
9409  __ Bind(&cond_fail_10);
9410  __ Mov(x6, 0);
9411  __ B(&return3);
9412  __ Bind(&cond_pass_10);
9413  __ Mov(x6, 1);
9414  __ B(&return3);
9415
9416  __ Bind(&cond_fail_11);
9417  __ Mov(x7, 0);
9418  __ B(&done);
9419  __ Bind(&cond_pass_11);
9420  __ Mov(x7, 1);
9421
9422  __ Bind(&done);
9423
9424  END();
9425
9426  RUN();
9427
9428  ASSERT_EQUAL_64(0x5555555500000001UL, x0);
9429  ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9430  ASSERT_EQUAL_64(0x1234567800000000UL, x2);
9431  ASSERT_EQUAL_64(0x8765432100000000UL, x3);
9432  ASSERT_EQUAL_64(0, x4);
9433  ASSERT_EQUAL_64(1, x5);
9434  ASSERT_EQUAL_64(1, x6);
9435  ASSERT_EQUAL_64(1, x7);
9436
9437  TEARDOWN();
9438}
9439
9440
9441TEST(noreg) {
9442  // This test doesn't generate any code, but it verifies some invariants
9443  // related to NoReg.
9444  CHECK(NoReg.Is(NoFPReg));
9445  CHECK(NoFPReg.Is(NoReg));
9446  CHECK(NoReg.Is(NoCPUReg));
9447  CHECK(NoCPUReg.Is(NoReg));
9448  CHECK(NoFPReg.Is(NoCPUReg));
9449  CHECK(NoCPUReg.Is(NoFPReg));
9450
9451  CHECK(NoReg.IsNone());
9452  CHECK(NoFPReg.IsNone());
9453  CHECK(NoCPUReg.IsNone());
9454}
9455
9456
9457TEST(isvalid) {
9458  // This test doesn't generate any code, but it verifies some invariants
9459  // related to IsValid().
9460  CHECK(!NoReg.IsValid());
9461  CHECK(!NoFPReg.IsValid());
9462  CHECK(!NoCPUReg.IsValid());
9463
9464  CHECK(x0.IsValid());
9465  CHECK(w0.IsValid());
9466  CHECK(x30.IsValid());
9467  CHECK(w30.IsValid());
9468  CHECK(xzr.IsValid());
9469  CHECK(wzr.IsValid());
9470
9471  CHECK(csp.IsValid());
9472  CHECK(wcsp.IsValid());
9473
9474  CHECK(d0.IsValid());
9475  CHECK(s0.IsValid());
9476  CHECK(d31.IsValid());
9477  CHECK(s31.IsValid());
9478
9479  CHECK(x0.IsValidRegister());
9480  CHECK(w0.IsValidRegister());
9481  CHECK(xzr.IsValidRegister());
9482  CHECK(wzr.IsValidRegister());
9483  CHECK(csp.IsValidRegister());
9484  CHECK(wcsp.IsValidRegister());
9485  CHECK(!x0.IsValidFPRegister());
9486  CHECK(!w0.IsValidFPRegister());
9487  CHECK(!xzr.IsValidFPRegister());
9488  CHECK(!wzr.IsValidFPRegister());
9489  CHECK(!csp.IsValidFPRegister());
9490  CHECK(!wcsp.IsValidFPRegister());
9491
9492  CHECK(d0.IsValidFPRegister());
9493  CHECK(s0.IsValidFPRegister());
9494  CHECK(!d0.IsValidRegister());
9495  CHECK(!s0.IsValidRegister());
9496
9497  // Test the same as before, but using CPURegister types. This shouldn't make
9498  // any difference.
9499  CHECK(static_cast<CPURegister>(x0).IsValid());
9500  CHECK(static_cast<CPURegister>(w0).IsValid());
9501  CHECK(static_cast<CPURegister>(x30).IsValid());
9502  CHECK(static_cast<CPURegister>(w30).IsValid());
9503  CHECK(static_cast<CPURegister>(xzr).IsValid());
9504  CHECK(static_cast<CPURegister>(wzr).IsValid());
9505
9506  CHECK(static_cast<CPURegister>(csp).IsValid());
9507  CHECK(static_cast<CPURegister>(wcsp).IsValid());
9508
9509  CHECK(static_cast<CPURegister>(d0).IsValid());
9510  CHECK(static_cast<CPURegister>(s0).IsValid());
9511  CHECK(static_cast<CPURegister>(d31).IsValid());
9512  CHECK(static_cast<CPURegister>(s31).IsValid());
9513
9514  CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9515  CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9516  CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9517  CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9518  CHECK(static_cast<CPURegister>(csp).IsValidRegister());
9519  CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
9520  CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9521  CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9522  CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9523  CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9524  CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
9525  CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
9526
9527  CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9528  CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9529  CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9530  CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
9531}
9532
9533
9534TEST(cpureglist_utils_x) {
9535  // This test doesn't generate any code, but it verifies the behaviour of
9536  // the CPURegList utility methods.
9537
9538  // Test a list of X registers.
9539  CPURegList test(x0, x1, x2, x3);
9540
9541  CHECK(test.IncludesAliasOf(x0));
9542  CHECK(test.IncludesAliasOf(x1));
9543  CHECK(test.IncludesAliasOf(x2));
9544  CHECK(test.IncludesAliasOf(x3));
9545  CHECK(test.IncludesAliasOf(w0));
9546  CHECK(test.IncludesAliasOf(w1));
9547  CHECK(test.IncludesAliasOf(w2));
9548  CHECK(test.IncludesAliasOf(w3));
9549
9550  CHECK(!test.IncludesAliasOf(x4));
9551  CHECK(!test.IncludesAliasOf(x30));
9552  CHECK(!test.IncludesAliasOf(xzr));
9553  CHECK(!test.IncludesAliasOf(csp));
9554  CHECK(!test.IncludesAliasOf(w4));
9555  CHECK(!test.IncludesAliasOf(w30));
9556  CHECK(!test.IncludesAliasOf(wzr));
9557  CHECK(!test.IncludesAliasOf(wcsp));
9558
9559  CHECK(!test.IncludesAliasOf(d0));
9560  CHECK(!test.IncludesAliasOf(d1));
9561  CHECK(!test.IncludesAliasOf(d2));
9562  CHECK(!test.IncludesAliasOf(d3));
9563  CHECK(!test.IncludesAliasOf(s0));
9564  CHECK(!test.IncludesAliasOf(s1));
9565  CHECK(!test.IncludesAliasOf(s2));
9566  CHECK(!test.IncludesAliasOf(s3));
9567
9568  CHECK(!test.IsEmpty());
9569
9570  CHECK(test.type() == x0.type());
9571
9572  CHECK(test.PopHighestIndex().Is(x3));
9573  CHECK(test.PopLowestIndex().Is(x0));
9574
9575  CHECK(test.IncludesAliasOf(x1));
9576  CHECK(test.IncludesAliasOf(x2));
9577  CHECK(test.IncludesAliasOf(w1));
9578  CHECK(test.IncludesAliasOf(w2));
9579  CHECK(!test.IncludesAliasOf(x0));
9580  CHECK(!test.IncludesAliasOf(x3));
9581  CHECK(!test.IncludesAliasOf(w0));
9582  CHECK(!test.IncludesAliasOf(w3));
9583
9584  CHECK(test.PopHighestIndex().Is(x2));
9585  CHECK(test.PopLowestIndex().Is(x1));
9586
9587  CHECK(!test.IncludesAliasOf(x1));
9588  CHECK(!test.IncludesAliasOf(x2));
9589  CHECK(!test.IncludesAliasOf(w1));
9590  CHECK(!test.IncludesAliasOf(w2));
9591
9592  CHECK(test.IsEmpty());
9593}
9594
9595
9596TEST(cpureglist_utils_w) {
9597  // This test doesn't generate any code, but it verifies the behaviour of
9598  // the CPURegList utility methods.
9599
9600  // Test a list of W registers.
9601  CPURegList test(w10, w11, w12, w13);
9602
9603  CHECK(test.IncludesAliasOf(x10));
9604  CHECK(test.IncludesAliasOf(x11));
9605  CHECK(test.IncludesAliasOf(x12));
9606  CHECK(test.IncludesAliasOf(x13));
9607  CHECK(test.IncludesAliasOf(w10));
9608  CHECK(test.IncludesAliasOf(w11));
9609  CHECK(test.IncludesAliasOf(w12));
9610  CHECK(test.IncludesAliasOf(w13));
9611
9612  CHECK(!test.IncludesAliasOf(x0));
9613  CHECK(!test.IncludesAliasOf(x9));
9614  CHECK(!test.IncludesAliasOf(x14));
9615  CHECK(!test.IncludesAliasOf(x30));
9616  CHECK(!test.IncludesAliasOf(xzr));
9617  CHECK(!test.IncludesAliasOf(csp));
9618  CHECK(!test.IncludesAliasOf(w0));
9619  CHECK(!test.IncludesAliasOf(w9));
9620  CHECK(!test.IncludesAliasOf(w14));
9621  CHECK(!test.IncludesAliasOf(w30));
9622  CHECK(!test.IncludesAliasOf(wzr));
9623  CHECK(!test.IncludesAliasOf(wcsp));
9624
9625  CHECK(!test.IncludesAliasOf(d10));
9626  CHECK(!test.IncludesAliasOf(d11));
9627  CHECK(!test.IncludesAliasOf(d12));
9628  CHECK(!test.IncludesAliasOf(d13));
9629  CHECK(!test.IncludesAliasOf(s10));
9630  CHECK(!test.IncludesAliasOf(s11));
9631  CHECK(!test.IncludesAliasOf(s12));
9632  CHECK(!test.IncludesAliasOf(s13));
9633
9634  CHECK(!test.IsEmpty());
9635
9636  CHECK(test.type() == w10.type());
9637
9638  CHECK(test.PopHighestIndex().Is(w13));
9639  CHECK(test.PopLowestIndex().Is(w10));
9640
9641  CHECK(test.IncludesAliasOf(x11));
9642  CHECK(test.IncludesAliasOf(x12));
9643  CHECK(test.IncludesAliasOf(w11));
9644  CHECK(test.IncludesAliasOf(w12));
9645  CHECK(!test.IncludesAliasOf(x10));
9646  CHECK(!test.IncludesAliasOf(x13));
9647  CHECK(!test.IncludesAliasOf(w10));
9648  CHECK(!test.IncludesAliasOf(w13));
9649
9650  CHECK(test.PopHighestIndex().Is(w12));
9651  CHECK(test.PopLowestIndex().Is(w11));
9652
9653  CHECK(!test.IncludesAliasOf(x11));
9654  CHECK(!test.IncludesAliasOf(x12));
9655  CHECK(!test.IncludesAliasOf(w11));
9656  CHECK(!test.IncludesAliasOf(w12));
9657
9658  CHECK(test.IsEmpty());
9659}
9660
9661
9662TEST(cpureglist_utils_d) {
9663  // This test doesn't generate any code, but it verifies the behaviour of
9664  // the CPURegList utility methods.
9665
9666  // Test a list of D registers.
9667  CPURegList test(d20, d21, d22, d23);
9668
9669  CHECK(test.IncludesAliasOf(d20));
9670  CHECK(test.IncludesAliasOf(d21));
9671  CHECK(test.IncludesAliasOf(d22));
9672  CHECK(test.IncludesAliasOf(d23));
9673  CHECK(test.IncludesAliasOf(s20));
9674  CHECK(test.IncludesAliasOf(s21));
9675  CHECK(test.IncludesAliasOf(s22));
9676  CHECK(test.IncludesAliasOf(s23));
9677
9678  CHECK(!test.IncludesAliasOf(d0));
9679  CHECK(!test.IncludesAliasOf(d19));
9680  CHECK(!test.IncludesAliasOf(d24));
9681  CHECK(!test.IncludesAliasOf(d31));
9682  CHECK(!test.IncludesAliasOf(s0));
9683  CHECK(!test.IncludesAliasOf(s19));
9684  CHECK(!test.IncludesAliasOf(s24));
9685  CHECK(!test.IncludesAliasOf(s31));
9686
9687  CHECK(!test.IncludesAliasOf(x20));
9688  CHECK(!test.IncludesAliasOf(x21));
9689  CHECK(!test.IncludesAliasOf(x22));
9690  CHECK(!test.IncludesAliasOf(x23));
9691  CHECK(!test.IncludesAliasOf(w20));
9692  CHECK(!test.IncludesAliasOf(w21));
9693  CHECK(!test.IncludesAliasOf(w22));
9694  CHECK(!test.IncludesAliasOf(w23));
9695
9696  CHECK(!test.IncludesAliasOf(xzr));
9697  CHECK(!test.IncludesAliasOf(wzr));
9698  CHECK(!test.IncludesAliasOf(csp));
9699  CHECK(!test.IncludesAliasOf(wcsp));
9700
9701  CHECK(!test.IsEmpty());
9702
9703  CHECK(test.type() == d20.type());
9704
9705  CHECK(test.PopHighestIndex().Is(d23));
9706  CHECK(test.PopLowestIndex().Is(d20));
9707
9708  CHECK(test.IncludesAliasOf(d21));
9709  CHECK(test.IncludesAliasOf(d22));
9710  CHECK(test.IncludesAliasOf(s21));
9711  CHECK(test.IncludesAliasOf(s22));
9712  CHECK(!test.IncludesAliasOf(d20));
9713  CHECK(!test.IncludesAliasOf(d23));
9714  CHECK(!test.IncludesAliasOf(s20));
9715  CHECK(!test.IncludesAliasOf(s23));
9716
9717  CHECK(test.PopHighestIndex().Is(d22));
9718  CHECK(test.PopLowestIndex().Is(d21));
9719
9720  CHECK(!test.IncludesAliasOf(d21));
9721  CHECK(!test.IncludesAliasOf(d22));
9722  CHECK(!test.IncludesAliasOf(s21));
9723  CHECK(!test.IncludesAliasOf(s22));
9724
9725  CHECK(test.IsEmpty());
9726}
9727
9728
9729TEST(cpureglist_utils_s) {
9730  // This test doesn't generate any code, but it verifies the behaviour of
9731  // the CPURegList utility methods.
9732
9733  // Test a list of S registers.
9734  CPURegList test(s20, s21, s22, s23);
9735
9736  // The type and size mechanisms are already covered, so here we just test
9737  // that lists of S registers alias individual D registers.
9738
9739  CHECK(test.IncludesAliasOf(d20));
9740  CHECK(test.IncludesAliasOf(d21));
9741  CHECK(test.IncludesAliasOf(d22));
9742  CHECK(test.IncludesAliasOf(d23));
9743  CHECK(test.IncludesAliasOf(s20));
9744  CHECK(test.IncludesAliasOf(s21));
9745  CHECK(test.IncludesAliasOf(s22));
9746  CHECK(test.IncludesAliasOf(s23));
9747}
9748
9749
9750TEST(cpureglist_utils_empty) {
9751  // This test doesn't generate any code, but it verifies the behaviour of
9752  // the CPURegList utility methods.
9753
9754  // Test an empty list.
9755  // Empty lists can have type and size properties. Check that we can create
9756  // them, and that they are empty.
9757  CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
9758  CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
9759  CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
9760  CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
9761
9762  CHECK(reg32.IsEmpty());
9763  CHECK(reg64.IsEmpty());
9764  CHECK(fpreg32.IsEmpty());
9765  CHECK(fpreg64.IsEmpty());
9766
9767  CHECK(reg32.PopLowestIndex().IsNone());
9768  CHECK(reg64.PopLowestIndex().IsNone());
9769  CHECK(fpreg32.PopLowestIndex().IsNone());
9770  CHECK(fpreg64.PopLowestIndex().IsNone());
9771
9772  CHECK(reg32.PopHighestIndex().IsNone());
9773  CHECK(reg64.PopHighestIndex().IsNone());
9774  CHECK(fpreg32.PopHighestIndex().IsNone());
9775  CHECK(fpreg64.PopHighestIndex().IsNone());
9776
9777  CHECK(reg32.IsEmpty());
9778  CHECK(reg64.IsEmpty());
9779  CHECK(fpreg32.IsEmpty());
9780  CHECK(fpreg64.IsEmpty());
9781}
9782
9783
9784TEST(printf) {
9785  INIT_V8();
9786  SETUP_SIZE(BUF_SIZE * 2);
9787  START();
9788
9789  char const * test_plain_string = "Printf with no arguments.\n";
9790  char const * test_substring = "'This is a substring.'";
9791  RegisterDump before;
9792
9793  // Initialize x29 to the value of the stack pointer. We will use x29 as a
9794  // temporary stack pointer later, and initializing it in this way allows the
9795  // RegisterDump check to pass.
9796  __ Mov(x29, __ StackPointer());
9797
9798  // Test simple integer arguments.
9799  __ Mov(x0, 1234);
9800  __ Mov(x1, 0x1234);
9801
9802  // Test simple floating-point arguments.
9803  __ Fmov(d0, 1.234);
9804
9805  // Test pointer (string) arguments.
9806  __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9807
9808  // Test the maximum number of arguments, and sign extension.
9809  __ Mov(w3, 0xffffffff);
9810  __ Mov(w4, 0xffffffff);
9811  __ Mov(x5, 0xffffffffffffffff);
9812  __ Mov(x6, 0xffffffffffffffff);
9813  __ Fmov(s1, 1.234);
9814  __ Fmov(s2, 2.345);
9815  __ Fmov(d3, 3.456);
9816  __ Fmov(d4, 4.567);
9817
9818  // Test printing callee-saved registers.
9819  __ Mov(x28, 0x123456789abcdef);
9820  __ Fmov(d10, 42.0);
9821
9822  // Test with three arguments.
9823  __ Mov(x10, 3);
9824  __ Mov(x11, 40);
9825  __ Mov(x12, 500);
9826
9827  // A single character.
9828  __ Mov(w13, 'x');
9829
9830  // Check that we don't clobber any registers.
9831  before.Dump(&masm);
9832
9833  __ Printf(test_plain_string);   // NOLINT(runtime/printf)
9834  __ Printf("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
9835  __ Printf("w5: %" PRId32 ", x5: %" PRId64"\n", w5, x5);
9836  __ Printf("d0: %f\n", d0);
9837  __ Printf("Test %%s: %s\n", x2);
9838  __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9839            "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9840            w3, w4, x5, x6);
9841  __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9842  __ Printf("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
9843  __ Printf("%g\n", d10);
9844  __ Printf("%%%%%s%%%c%%\n", x2, w13);
9845
9846  // Print the stack pointer (csp).
9847  ASSERT(csp.Is(__ StackPointer()));
9848  __ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
9849            __ StackPointer(), __ StackPointer().W());
9850
9851  // Test with a different stack pointer.
9852  const Register old_stack_pointer = __ StackPointer();
9853  __ Mov(x29, old_stack_pointer);
9854  __ SetStackPointer(x29);
9855  // Print the stack pointer (not csp).
9856  __ Printf("StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
9857            __ StackPointer(), __ StackPointer().W());
9858  __ Mov(old_stack_pointer, __ StackPointer());
9859  __ SetStackPointer(old_stack_pointer);
9860
9861  // Test with three arguments.
9862  __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
9863
9864  // Mixed argument types.
9865  __ Printf("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
9866            w3, s1, x5, d3);
9867  __ Printf("s1: %f, d3: %f, w3: %" PRId32 ", x5: %" PRId64 "\n",
9868            s1, d3, w3, x5);
9869
9870  END();
9871  RUN();
9872
9873  // We cannot easily test the output of the Printf sequences, and because
9874  // Printf preserves all registers by default, we can't look at the number of
9875  // bytes that were printed. However, the printf_no_preserve test should check
9876  // that, and here we just test that we didn't clobber any registers.
9877  ASSERT_EQUAL_REGISTERS(before);
9878
9879  TEARDOWN();
9880}
9881
9882
9883TEST(printf_no_preserve) {
9884  INIT_V8();
9885  SETUP();
9886  START();
9887
9888  char const * test_plain_string = "Printf with no arguments.\n";
9889  char const * test_substring = "'This is a substring.'";
9890
9891  __ PrintfNoPreserve(test_plain_string);
9892  __ Mov(x19, x0);
9893
9894  // Test simple integer arguments.
9895  __ Mov(x0, 1234);
9896  __ Mov(x1, 0x1234);
9897  __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9898  __ Mov(x20, x0);
9899
9900  // Test simple floating-point arguments.
9901  __ Fmov(d0, 1.234);
9902  __ PrintfNoPreserve("d0: %f\n", d0);
9903  __ Mov(x21, x0);
9904
9905  // Test pointer (string) arguments.
9906  __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9907  __ PrintfNoPreserve("Test %%s: %s\n", x2);
9908  __ Mov(x22, x0);
9909
9910  // Test the maximum number of arguments, and sign extension.
9911  __ Mov(w3, 0xffffffff);
9912  __ Mov(w4, 0xffffffff);
9913  __ Mov(x5, 0xffffffffffffffff);
9914  __ Mov(x6, 0xffffffffffffffff);
9915  __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9916                      "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9917                      w3, w4, x5, x6);
9918  __ Mov(x23, x0);
9919
9920  __ Fmov(s1, 1.234);
9921  __ Fmov(s2, 2.345);
9922  __ Fmov(d3, 3.456);
9923  __ Fmov(d4, 4.567);
9924  __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9925  __ Mov(x24, x0);
9926
9927  // Test printing callee-saved registers.
9928  __ Mov(x28, 0x123456789abcdef);
9929  __ PrintfNoPreserve("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
9930  __ Mov(x25, x0);
9931
9932  __ Fmov(d10, 42.0);
9933  __ PrintfNoPreserve("%g\n", d10);
9934  __ Mov(x26, x0);
9935
9936  // Test with a different stack pointer.
9937  const Register old_stack_pointer = __ StackPointer();
9938  __ Mov(x29, old_stack_pointer);
9939  __ SetStackPointer(x29);
9940  // Print the stack pointer (not csp).
9941  __ PrintfNoPreserve(
9942      "StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
9943      __ StackPointer(), __ StackPointer().W());
9944  __ Mov(x27, x0);
9945  __ Mov(old_stack_pointer, __ StackPointer());
9946  __ SetStackPointer(old_stack_pointer);
9947
9948  // Test with three arguments.
9949  __ Mov(x3, 3);
9950  __ Mov(x4, 40);
9951  __ Mov(x5, 500);
9952  __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
9953  __ Mov(x28, x0);
9954
9955  // Mixed argument types.
9956  __ Mov(w3, 0xffffffff);
9957  __ Fmov(s1, 1.234);
9958  __ Mov(x5, 0xffffffffffffffff);
9959  __ Fmov(d3, 3.456);
9960  __ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
9961                      w3, s1, x5, d3);
9962  __ Mov(x29, x0);
9963
9964  END();
9965  RUN();
9966
9967  // We cannot easily test the exact output of the Printf sequences, but we can
9968  // use the return code to check that the string length was correct.
9969
9970  // Printf with no arguments.
9971  ASSERT_EQUAL_64(strlen(test_plain_string), x19);
9972  // x0: 1234, x1: 0x00001234
9973  ASSERT_EQUAL_64(25, x20);
9974  // d0: 1.234000
9975  ASSERT_EQUAL_64(13, x21);
9976  // Test %s: 'This is a substring.'
9977  ASSERT_EQUAL_64(32, x22);
9978  // w3(uint32): 4294967295
9979  // w4(int32): -1
9980  // x5(uint64): 18446744073709551615
9981  // x6(int64): -1
9982  ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
9983  // %f: 1.234000
9984  // %g: 2.345
9985  // %e: 3.456000e+00
9986  // %E: 4.567000E+00
9987  ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
9988  // 0x89abcdef, 0x123456789abcdef
9989  ASSERT_EQUAL_64(30, x25);
9990  // 42
9991  ASSERT_EQUAL_64(3, x26);
9992  // StackPointer(not csp): 0x00007fb037ae2370, 0x37ae2370
9993  // Note: This is an example value, but the field width is fixed here so the
9994  // string length is still predictable.
9995  ASSERT_EQUAL_64(54, x27);
9996  // 3=3, 4=40, 5=500
9997  ASSERT_EQUAL_64(17, x28);
9998  // w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
9999  ASSERT_EQUAL_64(69, x29);
10000
10001  TEARDOWN();
10002}
10003
10004
10005// This is a V8-specific test.
10006static void CopyFieldsHelper(CPURegList temps) {
10007  static const uint64_t kLiteralBase = 0x0100001000100101UL;
10008  static const uint64_t src[] = {kLiteralBase * 1,
10009                                 kLiteralBase * 2,
10010                                 kLiteralBase * 3,
10011                                 kLiteralBase * 4,
10012                                 kLiteralBase * 5,
10013                                 kLiteralBase * 6,
10014                                 kLiteralBase * 7,
10015                                 kLiteralBase * 8,
10016                                 kLiteralBase * 9,
10017                                 kLiteralBase * 10,
10018                                 kLiteralBase * 11};
10019  static const uint64_t src_tagged =
10020      reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
10021
10022  static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
10023  uint64_t* dst[kTestCount];
10024  uint64_t dst_tagged[kTestCount];
10025
10026  // The first test will be to copy 0 fields. The destination (and source)
10027  // should not be accessed in any way.
10028  dst[0] = NULL;
10029  dst_tagged[0] = kHeapObjectTag;
10030
10031  // Allocate memory for each other test. Each test <n> will have <n> fields.
10032  // This is intended to exercise as many paths in CopyFields as possible.
10033  for (unsigned i = 1; i < kTestCount; i++) {
10034    dst[i] = new uint64_t[i];
10035    memset(dst[i], 0, i * sizeof(kLiteralBase));
10036    dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
10037  }
10038
10039  SETUP();
10040  START();
10041
10042  __ Mov(x0, dst_tagged[0]);
10043  __ Mov(x1, 0);
10044  __ CopyFields(x0, x1, temps, 0);
10045  for (unsigned i = 1; i < kTestCount; i++) {
10046    __ Mov(x0, dst_tagged[i]);
10047    __ Mov(x1, src_tagged);
10048    __ CopyFields(x0, x1, temps, i);
10049  }
10050
10051  END();
10052  RUN();
10053  TEARDOWN();
10054
10055  for (unsigned i = 1; i < kTestCount; i++) {
10056    for (unsigned j = 0; j < i; j++) {
10057      CHECK(src[j] == dst[i][j]);
10058    }
10059    delete [] dst[i];
10060  }
10061}
10062
10063
10064// This is a V8-specific test.
10065TEST(copyfields) {
10066  INIT_V8();
10067  CopyFieldsHelper(CPURegList(x10));
10068  CopyFieldsHelper(CPURegList(x10, x11));
10069  CopyFieldsHelper(CPURegList(x10, x11, x12));
10070  CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
10071}
10072
10073
10074static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
10075  SETUP();
10076
10077  START();
10078  Label end, slow;
10079  __ Mov(x2, 0xc001c0de);
10080  __ Mov(x1, value);
10081  __ SmiTag(x1);
10082  __ SmiAbs(x1, &slow);
10083  __ SmiUntag(x1);
10084  __ B(&end);
10085
10086  __ Bind(&slow);
10087  __ Mov(x2, 0xbad);
10088
10089  __ Bind(&end);
10090  END();
10091
10092  RUN();
10093
10094  if (must_fail) {
10095    // We tested an invalid conversion. The code must have jump on slow.
10096    ASSERT_EQUAL_64(0xbad, x2);
10097  } else {
10098    // The conversion is valid, check the result.
10099    int32_t result = (value >= 0) ? value : -value;
10100    ASSERT_EQUAL_64(result, x1);
10101
10102    // Check that we didn't jump on slow.
10103    ASSERT_EQUAL_64(0xc001c0de, x2);
10104  }
10105
10106  TEARDOWN();
10107}
10108
10109
10110TEST(smi_abs) {
10111  INIT_V8();
10112  // Simple and edge cases.
10113  DoSmiAbsTest(0);
10114  DoSmiAbsTest(0x12345);
10115  DoSmiAbsTest(0x40000000);
10116  DoSmiAbsTest(0x7fffffff);
10117  DoSmiAbsTest(-1);
10118  DoSmiAbsTest(-12345);
10119  DoSmiAbsTest(0x80000001);
10120
10121  // Check that the most negative SMI is detected.
10122  DoSmiAbsTest(0x80000000, true);
10123}
10124
10125
10126TEST(blr_lr) {
10127  // A simple test to check that the simulator correcty handle "blr lr".
10128  INIT_V8();
10129  SETUP();
10130
10131  START();
10132  Label target;
10133  Label end;
10134
10135  __ Mov(x0, 0x0);
10136  __ Adr(lr, &target);
10137
10138  __ Blr(lr);
10139  __ Mov(x0, 0xdeadbeef);
10140  __ B(&end);
10141
10142  __ Bind(&target);
10143  __ Mov(x0, 0xc001c0de);
10144
10145  __ Bind(&end);
10146  END();
10147
10148  RUN();
10149
10150  ASSERT_EQUAL_64(0xc001c0de, x0);
10151
10152  TEARDOWN();
10153}
10154
10155
10156TEST(barriers) {
10157  // Generate all supported barriers, this is just a smoke test
10158  INIT_V8();
10159  SETUP();
10160
10161  START();
10162
10163  // DMB
10164  __ Dmb(FullSystem, BarrierAll);
10165  __ Dmb(FullSystem, BarrierReads);
10166  __ Dmb(FullSystem, BarrierWrites);
10167  __ Dmb(FullSystem, BarrierOther);
10168
10169  __ Dmb(InnerShareable, BarrierAll);
10170  __ Dmb(InnerShareable, BarrierReads);
10171  __ Dmb(InnerShareable, BarrierWrites);
10172  __ Dmb(InnerShareable, BarrierOther);
10173
10174  __ Dmb(NonShareable, BarrierAll);
10175  __ Dmb(NonShareable, BarrierReads);
10176  __ Dmb(NonShareable, BarrierWrites);
10177  __ Dmb(NonShareable, BarrierOther);
10178
10179  __ Dmb(OuterShareable, BarrierAll);
10180  __ Dmb(OuterShareable, BarrierReads);
10181  __ Dmb(OuterShareable, BarrierWrites);
10182  __ Dmb(OuterShareable, BarrierOther);
10183
10184  // DSB
10185  __ Dsb(FullSystem, BarrierAll);
10186  __ Dsb(FullSystem, BarrierReads);
10187  __ Dsb(FullSystem, BarrierWrites);
10188  __ Dsb(FullSystem, BarrierOther);
10189
10190  __ Dsb(InnerShareable, BarrierAll);
10191  __ Dsb(InnerShareable, BarrierReads);
10192  __ Dsb(InnerShareable, BarrierWrites);
10193  __ Dsb(InnerShareable, BarrierOther);
10194
10195  __ Dsb(NonShareable, BarrierAll);
10196  __ Dsb(NonShareable, BarrierReads);
10197  __ Dsb(NonShareable, BarrierWrites);
10198  __ Dsb(NonShareable, BarrierOther);
10199
10200  __ Dsb(OuterShareable, BarrierAll);
10201  __ Dsb(OuterShareable, BarrierReads);
10202  __ Dsb(OuterShareable, BarrierWrites);
10203  __ Dsb(OuterShareable, BarrierOther);
10204
10205  // ISB
10206  __ Isb();
10207
10208  END();
10209
10210  RUN();
10211
10212  TEARDOWN();
10213}
10214
10215
10216TEST(process_nan_double) {
10217  INIT_V8();
10218  // Make sure that NaN propagation works correctly.
10219  double sn = rawbits_to_double(0x7ff5555511111111);
10220  double qn = rawbits_to_double(0x7ffaaaaa11111111);
10221  ASSERT(IsSignallingNaN(sn));
10222  ASSERT(IsQuietNaN(qn));
10223
10224  // The input NaNs after passing through ProcessNaN.
10225  double sn_proc = rawbits_to_double(0x7ffd555511111111);
10226  double qn_proc = qn;
10227  ASSERT(IsQuietNaN(sn_proc));
10228  ASSERT(IsQuietNaN(qn_proc));
10229
10230  SETUP();
10231  START();
10232
10233  // Execute a number of instructions which all use ProcessNaN, and check that
10234  // they all handle the NaN correctly.
10235  __ Fmov(d0, sn);
10236  __ Fmov(d10, qn);
10237
10238  // Operations that always propagate NaNs unchanged, even signalling NaNs.
10239  //   - Signalling NaN
10240  __ Fmov(d1, d0);
10241  __ Fabs(d2, d0);
10242  __ Fneg(d3, d0);
10243  //   - Quiet NaN
10244  __ Fmov(d11, d10);
10245  __ Fabs(d12, d10);
10246  __ Fneg(d13, d10);
10247
10248  // Operations that use ProcessNaN.
10249  //   - Signalling NaN
10250  __ Fsqrt(d4, d0);
10251  __ Frinta(d5, d0);
10252  __ Frintn(d6, d0);
10253  __ Frintz(d7, d0);
10254  //   - Quiet NaN
10255  __ Fsqrt(d14, d10);
10256  __ Frinta(d15, d10);
10257  __ Frintn(d16, d10);
10258  __ Frintz(d17, d10);
10259
10260  // The behaviour of fcvt is checked in TEST(fcvt_sd).
10261
10262  END();
10263  RUN();
10264
10265  uint64_t qn_raw = double_to_rawbits(qn);
10266  uint64_t sn_raw = double_to_rawbits(sn);
10267
10268  //   - Signalling NaN
10269  ASSERT_EQUAL_FP64(sn, d1);
10270  ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
10271  ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
10272  //   - Quiet NaN
10273  ASSERT_EQUAL_FP64(qn, d11);
10274  ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
10275  ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
10276
10277  //   - Signalling NaN
10278  ASSERT_EQUAL_FP64(sn_proc, d4);
10279  ASSERT_EQUAL_FP64(sn_proc, d5);
10280  ASSERT_EQUAL_FP64(sn_proc, d6);
10281  ASSERT_EQUAL_FP64(sn_proc, d7);
10282  //   - Quiet NaN
10283  ASSERT_EQUAL_FP64(qn_proc, d14);
10284  ASSERT_EQUAL_FP64(qn_proc, d15);
10285  ASSERT_EQUAL_FP64(qn_proc, d16);
10286  ASSERT_EQUAL_FP64(qn_proc, d17);
10287
10288  TEARDOWN();
10289}
10290
10291
10292TEST(process_nan_float) {
10293  INIT_V8();
10294  // Make sure that NaN propagation works correctly.
10295  float sn = rawbits_to_float(0x7f951111);
10296  float qn = rawbits_to_float(0x7fea1111);
10297  ASSERT(IsSignallingNaN(sn));
10298  ASSERT(IsQuietNaN(qn));
10299
10300  // The input NaNs after passing through ProcessNaN.
10301  float sn_proc = rawbits_to_float(0x7fd51111);
10302  float qn_proc = qn;
10303  ASSERT(IsQuietNaN(sn_proc));
10304  ASSERT(IsQuietNaN(qn_proc));
10305
10306  SETUP();
10307  START();
10308
10309  // Execute a number of instructions which all use ProcessNaN, and check that
10310  // they all handle the NaN correctly.
10311  __ Fmov(s0, sn);
10312  __ Fmov(s10, qn);
10313
10314  // Operations that always propagate NaNs unchanged, even signalling NaNs.
10315  //   - Signalling NaN
10316  __ Fmov(s1, s0);
10317  __ Fabs(s2, s0);
10318  __ Fneg(s3, s0);
10319  //   - Quiet NaN
10320  __ Fmov(s11, s10);
10321  __ Fabs(s12, s10);
10322  __ Fneg(s13, s10);
10323
10324  // Operations that use ProcessNaN.
10325  //   - Signalling NaN
10326  __ Fsqrt(s4, s0);
10327  __ Frinta(s5, s0);
10328  __ Frintn(s6, s0);
10329  __ Frintz(s7, s0);
10330  //   - Quiet NaN
10331  __ Fsqrt(s14, s10);
10332  __ Frinta(s15, s10);
10333  __ Frintn(s16, s10);
10334  __ Frintz(s17, s10);
10335
10336  // The behaviour of fcvt is checked in TEST(fcvt_sd).
10337
10338  END();
10339  RUN();
10340
10341  uint32_t qn_raw = float_to_rawbits(qn);
10342  uint32_t sn_raw = float_to_rawbits(sn);
10343
10344  //   - Signalling NaN
10345  ASSERT_EQUAL_FP32(sn, s1);
10346  ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
10347  ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
10348  //   - Quiet NaN
10349  ASSERT_EQUAL_FP32(qn, s11);
10350  ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
10351  ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
10352
10353  //   - Signalling NaN
10354  ASSERT_EQUAL_FP32(sn_proc, s4);
10355  ASSERT_EQUAL_FP32(sn_proc, s5);
10356  ASSERT_EQUAL_FP32(sn_proc, s6);
10357  ASSERT_EQUAL_FP32(sn_proc, s7);
10358  //   - Quiet NaN
10359  ASSERT_EQUAL_FP32(qn_proc, s14);
10360  ASSERT_EQUAL_FP32(qn_proc, s15);
10361  ASSERT_EQUAL_FP32(qn_proc, s16);
10362  ASSERT_EQUAL_FP32(qn_proc, s17);
10363
10364  TEARDOWN();
10365}
10366
10367
10368static void ProcessNaNsHelper(double n, double m, double expected) {
10369  ASSERT(std::isnan(n) || std::isnan(m));
10370  ASSERT(isnan(expected));
10371
10372  SETUP();
10373  START();
10374
10375  // Execute a number of instructions which all use ProcessNaNs, and check that
10376  // they all propagate NaNs correctly.
10377  __ Fmov(d0, n);
10378  __ Fmov(d1, m);
10379
10380  __ Fadd(d2, d0, d1);
10381  __ Fsub(d3, d0, d1);
10382  __ Fmul(d4, d0, d1);
10383  __ Fdiv(d5, d0, d1);
10384  __ Fmax(d6, d0, d1);
10385  __ Fmin(d7, d0, d1);
10386
10387  END();
10388  RUN();
10389
10390  ASSERT_EQUAL_FP64(expected, d2);
10391  ASSERT_EQUAL_FP64(expected, d3);
10392  ASSERT_EQUAL_FP64(expected, d4);
10393  ASSERT_EQUAL_FP64(expected, d5);
10394  ASSERT_EQUAL_FP64(expected, d6);
10395  ASSERT_EQUAL_FP64(expected, d7);
10396
10397  TEARDOWN();
10398}
10399
10400
10401TEST(process_nans_double) {
10402  INIT_V8();
10403  // Make sure that NaN propagation works correctly.
10404  double sn = rawbits_to_double(0x7ff5555511111111);
10405  double sm = rawbits_to_double(0x7ff5555522222222);
10406  double qn = rawbits_to_double(0x7ffaaaaa11111111);
10407  double qm = rawbits_to_double(0x7ffaaaaa22222222);
10408  ASSERT(IsSignallingNaN(sn));
10409  ASSERT(IsSignallingNaN(sm));
10410  ASSERT(IsQuietNaN(qn));
10411  ASSERT(IsQuietNaN(qm));
10412
10413  // The input NaNs after passing through ProcessNaN.
10414  double sn_proc = rawbits_to_double(0x7ffd555511111111);
10415  double sm_proc = rawbits_to_double(0x7ffd555522222222);
10416  double qn_proc = qn;
10417  double qm_proc = qm;
10418  ASSERT(IsQuietNaN(sn_proc));
10419  ASSERT(IsQuietNaN(sm_proc));
10420  ASSERT(IsQuietNaN(qn_proc));
10421  ASSERT(IsQuietNaN(qm_proc));
10422
10423  // Quiet NaNs are propagated.
10424  ProcessNaNsHelper(qn, 0, qn_proc);
10425  ProcessNaNsHelper(0, qm, qm_proc);
10426  ProcessNaNsHelper(qn, qm, qn_proc);
10427
10428  // Signalling NaNs are propagated, and made quiet.
10429  ProcessNaNsHelper(sn, 0, sn_proc);
10430  ProcessNaNsHelper(0, sm, sm_proc);
10431  ProcessNaNsHelper(sn, sm, sn_proc);
10432
10433  // Signalling NaNs take precedence over quiet NaNs.
10434  ProcessNaNsHelper(sn, qm, sn_proc);
10435  ProcessNaNsHelper(qn, sm, sm_proc);
10436  ProcessNaNsHelper(sn, sm, sn_proc);
10437}
10438
10439
10440static void ProcessNaNsHelper(float n, float m, float expected) {
10441  ASSERT(std::isnan(n) || std::isnan(m));
10442  ASSERT(isnan(expected));
10443
10444  SETUP();
10445  START();
10446
10447  // Execute a number of instructions which all use ProcessNaNs, and check that
10448  // they all propagate NaNs correctly.
10449  __ Fmov(s0, n);
10450  __ Fmov(s1, m);
10451
10452  __ Fadd(s2, s0, s1);
10453  __ Fsub(s3, s0, s1);
10454  __ Fmul(s4, s0, s1);
10455  __ Fdiv(s5, s0, s1);
10456  __ Fmax(s6, s0, s1);
10457  __ Fmin(s7, s0, s1);
10458
10459  END();
10460  RUN();
10461
10462  ASSERT_EQUAL_FP32(expected, s2);
10463  ASSERT_EQUAL_FP32(expected, s3);
10464  ASSERT_EQUAL_FP32(expected, s4);
10465  ASSERT_EQUAL_FP32(expected, s5);
10466  ASSERT_EQUAL_FP32(expected, s6);
10467  ASSERT_EQUAL_FP32(expected, s7);
10468
10469  TEARDOWN();
10470}
10471
10472
10473TEST(process_nans_float) {
10474  INIT_V8();
10475  // Make sure that NaN propagation works correctly.
10476  float sn = rawbits_to_float(0x7f951111);
10477  float sm = rawbits_to_float(0x7f952222);
10478  float qn = rawbits_to_float(0x7fea1111);
10479  float qm = rawbits_to_float(0x7fea2222);
10480  ASSERT(IsSignallingNaN(sn));
10481  ASSERT(IsSignallingNaN(sm));
10482  ASSERT(IsQuietNaN(qn));
10483  ASSERT(IsQuietNaN(qm));
10484
10485  // The input NaNs after passing through ProcessNaN.
10486  float sn_proc = rawbits_to_float(0x7fd51111);
10487  float sm_proc = rawbits_to_float(0x7fd52222);
10488  float qn_proc = qn;
10489  float qm_proc = qm;
10490  ASSERT(IsQuietNaN(sn_proc));
10491  ASSERT(IsQuietNaN(sm_proc));
10492  ASSERT(IsQuietNaN(qn_proc));
10493  ASSERT(IsQuietNaN(qm_proc));
10494
10495  // Quiet NaNs are propagated.
10496  ProcessNaNsHelper(qn, 0, qn_proc);
10497  ProcessNaNsHelper(0, qm, qm_proc);
10498  ProcessNaNsHelper(qn, qm, qn_proc);
10499
10500  // Signalling NaNs are propagated, and made quiet.
10501  ProcessNaNsHelper(sn, 0, sn_proc);
10502  ProcessNaNsHelper(0, sm, sm_proc);
10503  ProcessNaNsHelper(sn, sm, sn_proc);
10504
10505  // Signalling NaNs take precedence over quiet NaNs.
10506  ProcessNaNsHelper(sn, qm, sn_proc);
10507  ProcessNaNsHelper(qn, sm, sm_proc);
10508  ProcessNaNsHelper(sn, sm, sn_proc);
10509}
10510
10511
10512static void DefaultNaNHelper(float n, float m, float a) {
10513  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
10514
10515  bool test_1op = std::isnan(n);
10516  bool test_2op = std::isnan(n) || std::isnan(m);
10517
10518  SETUP();
10519  START();
10520
10521  // Enable Default-NaN mode in the FPCR.
10522  __ Mrs(x0, FPCR);
10523  __ Orr(x1, x0, DN_mask);
10524  __ Msr(FPCR, x1);
10525
10526  // Execute a number of instructions which all use ProcessNaNs, and check that
10527  // they all produce the default NaN.
10528  __ Fmov(s0, n);
10529  __ Fmov(s1, m);
10530  __ Fmov(s2, a);
10531
10532  if (test_1op) {
10533    // Operations that always propagate NaNs unchanged, even signalling NaNs.
10534    __ Fmov(s10, s0);
10535    __ Fabs(s11, s0);
10536    __ Fneg(s12, s0);
10537
10538    // Operations that use ProcessNaN.
10539    __ Fsqrt(s13, s0);
10540    __ Frinta(s14, s0);
10541    __ Frintn(s15, s0);
10542    __ Frintz(s16, s0);
10543
10544    // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10545    __ Fcvt(d17, s0);
10546  }
10547
10548  if (test_2op) {
10549    __ Fadd(s18, s0, s1);
10550    __ Fsub(s19, s0, s1);
10551    __ Fmul(s20, s0, s1);
10552    __ Fdiv(s21, s0, s1);
10553    __ Fmax(s22, s0, s1);
10554    __ Fmin(s23, s0, s1);
10555  }
10556
10557  __ Fmadd(s24, s0, s1, s2);
10558  __ Fmsub(s25, s0, s1, s2);
10559  __ Fnmadd(s26, s0, s1, s2);
10560  __ Fnmsub(s27, s0, s1, s2);
10561
10562  // Restore FPCR.
10563  __ Msr(FPCR, x0);
10564
10565  END();
10566  RUN();
10567
10568  if (test_1op) {
10569    uint32_t n_raw = float_to_rawbits(n);
10570    ASSERT_EQUAL_FP32(n, s10);
10571    ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
10572    ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
10573    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
10574    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
10575    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
10576    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
10577    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
10578  }
10579
10580  if (test_2op) {
10581    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
10582    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
10583    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
10584    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
10585    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
10586    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
10587  }
10588
10589  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
10590  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
10591  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
10592  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
10593
10594  TEARDOWN();
10595}
10596
10597
10598TEST(default_nan_float) {
10599  INIT_V8();
10600  float sn = rawbits_to_float(0x7f951111);
10601  float sm = rawbits_to_float(0x7f952222);
10602  float sa = rawbits_to_float(0x7f95aaaa);
10603  float qn = rawbits_to_float(0x7fea1111);
10604  float qm = rawbits_to_float(0x7fea2222);
10605  float qa = rawbits_to_float(0x7feaaaaa);
10606  ASSERT(IsSignallingNaN(sn));
10607  ASSERT(IsSignallingNaN(sm));
10608  ASSERT(IsSignallingNaN(sa));
10609  ASSERT(IsQuietNaN(qn));
10610  ASSERT(IsQuietNaN(qm));
10611  ASSERT(IsQuietNaN(qa));
10612
10613  //   - Signalling NaNs
10614  DefaultNaNHelper(sn, 0.0f, 0.0f);
10615  DefaultNaNHelper(0.0f, sm, 0.0f);
10616  DefaultNaNHelper(0.0f, 0.0f, sa);
10617  DefaultNaNHelper(sn, sm, 0.0f);
10618  DefaultNaNHelper(0.0f, sm, sa);
10619  DefaultNaNHelper(sn, 0.0f, sa);
10620  DefaultNaNHelper(sn, sm, sa);
10621  //   - Quiet NaNs
10622  DefaultNaNHelper(qn, 0.0f, 0.0f);
10623  DefaultNaNHelper(0.0f, qm, 0.0f);
10624  DefaultNaNHelper(0.0f, 0.0f, qa);
10625  DefaultNaNHelper(qn, qm, 0.0f);
10626  DefaultNaNHelper(0.0f, qm, qa);
10627  DefaultNaNHelper(qn, 0.0f, qa);
10628  DefaultNaNHelper(qn, qm, qa);
10629  //   - Mixed NaNs
10630  DefaultNaNHelper(qn, sm, sa);
10631  DefaultNaNHelper(sn, qm, sa);
10632  DefaultNaNHelper(sn, sm, qa);
10633  DefaultNaNHelper(qn, qm, sa);
10634  DefaultNaNHelper(sn, qm, qa);
10635  DefaultNaNHelper(qn, sm, qa);
10636  DefaultNaNHelper(qn, qm, qa);
10637}
10638
10639
10640static void DefaultNaNHelper(double n, double m, double a) {
10641  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
10642
10643  bool test_1op = std::isnan(n);
10644  bool test_2op = std::isnan(n) || std::isnan(m);
10645
10646  SETUP();
10647  START();
10648
10649  // Enable Default-NaN mode in the FPCR.
10650  __ Mrs(x0, FPCR);
10651  __ Orr(x1, x0, DN_mask);
10652  __ Msr(FPCR, x1);
10653
10654  // Execute a number of instructions which all use ProcessNaNs, and check that
10655  // they all produce the default NaN.
10656  __ Fmov(d0, n);
10657  __ Fmov(d1, m);
10658  __ Fmov(d2, a);
10659
10660  if (test_1op) {
10661    // Operations that always propagate NaNs unchanged, even signalling NaNs.
10662    __ Fmov(d10, d0);
10663    __ Fabs(d11, d0);
10664    __ Fneg(d12, d0);
10665
10666    // Operations that use ProcessNaN.
10667    __ Fsqrt(d13, d0);
10668    __ Frinta(d14, d0);
10669    __ Frintn(d15, d0);
10670    __ Frintz(d16, d0);
10671
10672    // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10673    __ Fcvt(s17, d0);
10674  }
10675
10676  if (test_2op) {
10677    __ Fadd(d18, d0, d1);
10678    __ Fsub(d19, d0, d1);
10679    __ Fmul(d20, d0, d1);
10680    __ Fdiv(d21, d0, d1);
10681    __ Fmax(d22, d0, d1);
10682    __ Fmin(d23, d0, d1);
10683  }
10684
10685  __ Fmadd(d24, d0, d1, d2);
10686  __ Fmsub(d25, d0, d1, d2);
10687  __ Fnmadd(d26, d0, d1, d2);
10688  __ Fnmsub(d27, d0, d1, d2);
10689
10690  // Restore FPCR.
10691  __ Msr(FPCR, x0);
10692
10693  END();
10694  RUN();
10695
10696  if (test_1op) {
10697    uint64_t n_raw = double_to_rawbits(n);
10698    ASSERT_EQUAL_FP64(n, d10);
10699    ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
10700    ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
10701    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
10702    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
10703    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
10704    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
10705    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
10706  }
10707
10708  if (test_2op) {
10709    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
10710    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
10711    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
10712    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
10713    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
10714    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
10715  }
10716
10717  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
10718  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
10719  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
10720  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
10721
10722  TEARDOWN();
10723}
10724
10725
10726TEST(default_nan_double) {
10727  INIT_V8();
10728  double sn = rawbits_to_double(0x7ff5555511111111);
10729  double sm = rawbits_to_double(0x7ff5555522222222);
10730  double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
10731  double qn = rawbits_to_double(0x7ffaaaaa11111111);
10732  double qm = rawbits_to_double(0x7ffaaaaa22222222);
10733  double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
10734  ASSERT(IsSignallingNaN(sn));
10735  ASSERT(IsSignallingNaN(sm));
10736  ASSERT(IsSignallingNaN(sa));
10737  ASSERT(IsQuietNaN(qn));
10738  ASSERT(IsQuietNaN(qm));
10739  ASSERT(IsQuietNaN(qa));
10740
10741  //   - Signalling NaNs
10742  DefaultNaNHelper(sn, 0.0, 0.0);
10743  DefaultNaNHelper(0.0, sm, 0.0);
10744  DefaultNaNHelper(0.0, 0.0, sa);
10745  DefaultNaNHelper(sn, sm, 0.0);
10746  DefaultNaNHelper(0.0, sm, sa);
10747  DefaultNaNHelper(sn, 0.0, sa);
10748  DefaultNaNHelper(sn, sm, sa);
10749  //   - Quiet NaNs
10750  DefaultNaNHelper(qn, 0.0, 0.0);
10751  DefaultNaNHelper(0.0, qm, 0.0);
10752  DefaultNaNHelper(0.0, 0.0, qa);
10753  DefaultNaNHelper(qn, qm, 0.0);
10754  DefaultNaNHelper(0.0, qm, qa);
10755  DefaultNaNHelper(qn, 0.0, qa);
10756  DefaultNaNHelper(qn, qm, qa);
10757  //   - Mixed NaNs
10758  DefaultNaNHelper(qn, sm, sa);
10759  DefaultNaNHelper(sn, qm, sa);
10760  DefaultNaNHelper(sn, sm, qa);
10761  DefaultNaNHelper(qn, qm, sa);
10762  DefaultNaNHelper(sn, qm, qa);
10763  DefaultNaNHelper(qn, sm, qa);
10764  DefaultNaNHelper(qn, qm, qa);
10765}
10766
10767
10768TEST(call_no_relocation) {
10769  Address call_start;
10770  Address return_address;
10771
10772  INIT_V8();
10773  SETUP();
10774
10775  START();
10776
10777  Label function;
10778  Label test;
10779
10780  __ B(&test);
10781
10782  __ Bind(&function);
10783  __ Mov(x0, 0x1);
10784  __ Ret();
10785
10786  __ Bind(&test);
10787  __ Mov(x0, 0x0);
10788  __ Push(lr, xzr);
10789  {
10790    Assembler::BlockConstPoolScope scope(&masm);
10791    call_start = buf + __ pc_offset();
10792    __ Call(buf + function.pos(), RelocInfo::NONE64);
10793    return_address = buf + __ pc_offset();
10794  }
10795  __ Pop(xzr, lr);
10796  END();
10797
10798  RUN();
10799
10800  ASSERT_EQUAL_64(1, x0);
10801
10802  // The return_address_from_call_start function doesn't currently encounter any
10803  // non-relocatable sequences, so we check it here to make sure it works.
10804  // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
10805  // non-relocatable calls at all.
10806  CHECK(return_address ==
10807        Assembler::return_address_from_call_start(call_start));
10808
10809  TEARDOWN();
10810}
10811
10812
10813static void AbsHelperX(int64_t value) {
10814  int64_t expected;
10815
10816  SETUP();
10817  START();
10818
10819  Label fail;
10820  Label done;
10821
10822  __ Mov(x0, 0);
10823  __ Mov(x1, value);
10824
10825  if (value != kXMinInt) {
10826    expected = labs(value);
10827
10828    Label next;
10829    // The result is representable.
10830    __ Abs(x10, x1);
10831    __ Abs(x11, x1, &fail);
10832    __ Abs(x12, x1, &fail, &next);
10833    __ Bind(&next);
10834    __ Abs(x13, x1, NULL, &done);
10835  } else {
10836    // labs is undefined for kXMinInt but our implementation in the
10837    // MacroAssembler will return kXMinInt in such a case.
10838    expected = kXMinInt;
10839
10840    Label next;
10841    // The result is not representable.
10842    __ Abs(x10, x1);
10843    __ Abs(x11, x1, NULL, &fail);
10844    __ Abs(x12, x1, &next, &fail);
10845    __ Bind(&next);
10846    __ Abs(x13, x1, &done);
10847  }
10848
10849  __ Bind(&fail);
10850  __ Mov(x0, -1);
10851
10852  __ Bind(&done);
10853
10854  END();
10855  RUN();
10856
10857  ASSERT_EQUAL_64(0, x0);
10858  ASSERT_EQUAL_64(value, x1);
10859  ASSERT_EQUAL_64(expected, x10);
10860  ASSERT_EQUAL_64(expected, x11);
10861  ASSERT_EQUAL_64(expected, x12);
10862  ASSERT_EQUAL_64(expected, x13);
10863
10864  TEARDOWN();
10865}
10866
10867
10868static void AbsHelperW(int32_t value) {
10869  int32_t expected;
10870
10871  SETUP();
10872  START();
10873
10874  Label fail;
10875  Label done;
10876
10877  __ Mov(w0, 0);
10878  // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
10879  // Once it is fixed, we should remove the cast.
10880  __ Mov(w1, static_cast<uint32_t>(value));
10881
10882  if (value != kWMinInt) {
10883    expected = abs(value);
10884
10885    Label next;
10886    // The result is representable.
10887    __ Abs(w10, w1);
10888    __ Abs(w11, w1, &fail);
10889    __ Abs(w12, w1, &fail, &next);
10890    __ Bind(&next);
10891    __ Abs(w13, w1, NULL, &done);
10892  } else {
10893    // abs is undefined for kWMinInt but our implementation in the
10894    // MacroAssembler will return kWMinInt in such a case.
10895    expected = kWMinInt;
10896
10897    Label next;
10898    // The result is not representable.
10899    __ Abs(w10, w1);
10900    __ Abs(w11, w1, NULL, &fail);
10901    __ Abs(w12, w1, &next, &fail);
10902    __ Bind(&next);
10903    __ Abs(w13, w1, &done);
10904  }
10905
10906  __ Bind(&fail);
10907  __ Mov(w0, -1);
10908
10909  __ Bind(&done);
10910
10911  END();
10912  RUN();
10913
10914  ASSERT_EQUAL_32(0, w0);
10915  ASSERT_EQUAL_32(value, w1);
10916  ASSERT_EQUAL_32(expected, w10);
10917  ASSERT_EQUAL_32(expected, w11);
10918  ASSERT_EQUAL_32(expected, w12);
10919  ASSERT_EQUAL_32(expected, w13);
10920
10921  TEARDOWN();
10922}
10923
10924
10925TEST(abs) {
10926  INIT_V8();
10927  AbsHelperX(0);
10928  AbsHelperX(42);
10929  AbsHelperX(-42);
10930  AbsHelperX(kXMinInt);
10931  AbsHelperX(kXMaxInt);
10932
10933  AbsHelperW(0);
10934  AbsHelperW(42);
10935  AbsHelperW(-42);
10936  AbsHelperW(kWMinInt);
10937  AbsHelperW(kWMaxInt);
10938}
10939
10940
10941TEST(pool_size) {
10942  INIT_V8();
10943  SETUP();
10944
10945  // This test does not execute any code. It only tests that the size of the
10946  // pools is read correctly from the RelocInfo.
10947
10948  Label exit;
10949  __ b(&exit);
10950
10951  const unsigned constant_pool_size = 312;
10952  const unsigned veneer_pool_size = 184;
10953
10954  __ RecordConstPool(constant_pool_size);
10955  for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
10956    __ dc32(0);
10957  }
10958
10959  __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
10960  for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
10961    __ nop();
10962  }
10963
10964  __ bind(&exit);
10965
10966  HandleScope handle_scope(isolate);
10967  CodeDesc desc;
10968  masm.GetCode(&desc);
10969  Handle<Code> code = isolate->factory()->NewCode(desc, 0, masm.CodeObject());
10970
10971  unsigned pool_count = 0;
10972  int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
10973                  RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
10974  for (RelocIterator it(*code, pool_mask); !it.done(); it.next()) {
10975    RelocInfo* info = it.rinfo();
10976    if (RelocInfo::IsConstPool(info->rmode())) {
10977      ASSERT(info->data() == constant_pool_size);
10978      ++pool_count;
10979    }
10980    if (RelocInfo::IsVeneerPool(info->rmode())) {
10981      ASSERT(info->data() == veneer_pool_size);
10982      ++pool_count;
10983    }
10984  }
10985
10986  ASSERT(pool_count == 2);
10987
10988  TEARDOWN();
10989}
10990