1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "src/v8.h"
29
30#include "src/disassembler.h"
31#include "src/factory.h"
32#include "src/macro-assembler.h"
33#include "src/mips64/macro-assembler-mips64.h"
34#include "src/mips64/simulator-mips64.h"
35
36#include "test/cctest/cctest.h"
37
38using namespace v8::internal;
39
40
41// Define these function prototypes to match JSEntryFunction in execution.cc.
42typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
43typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
44typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
45
46
47#define __ assm.
48
49
50TEST(MIPS0) {
51  CcTest::InitializeVM();
52  Isolate* isolate = CcTest::i_isolate();
53  HandleScope scope(isolate);
54
55  MacroAssembler assm(isolate, NULL, 0);
56
57  // Addition.
58  __ addu(v0, a0, a1);
59  __ jr(ra);
60  __ nop();
61
62  CodeDesc desc;
63  assm.GetCode(&desc);
64  Handle<Code> code = isolate->factory()->NewCode(
65      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
66  F2 f = FUNCTION_CAST<F2>(code->entry());
67  int64_t res =
68      reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
69  ::printf("f() = %ld\n", res);
70  CHECK_EQ(0xabcL, res);
71}
72
73
74TEST(MIPS1) {
75  CcTest::InitializeVM();
76  Isolate* isolate = CcTest::i_isolate();
77  HandleScope scope(isolate);
78
79  MacroAssembler assm(isolate, NULL, 0);
80  Label L, C;
81
82  __ mov(a1, a0);
83  __ li(v0, 0);
84  __ b(&C);
85  __ nop();
86
87  __ bind(&L);
88  __ addu(v0, v0, a1);
89  __ addiu(a1, a1, -1);
90
91  __ bind(&C);
92  __ xori(v1, a1, 0);
93  __ Branch(&L, ne, v1, Operand((int64_t)0));
94  __ nop();
95
96  __ jr(ra);
97  __ nop();
98
99  CodeDesc desc;
100  assm.GetCode(&desc);
101  Handle<Code> code = isolate->factory()->NewCode(
102      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
103  F1 f = FUNCTION_CAST<F1>(code->entry());
104  int64_t res =
105     reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
106  ::printf("f() = %ld\n", res);
107  CHECK_EQ(1275L, res);
108}
109
110
111TEST(MIPS2) {
112  CcTest::InitializeVM();
113  Isolate* isolate = CcTest::i_isolate();
114  HandleScope scope(isolate);
115
116  MacroAssembler assm(isolate, NULL, 0);
117
118  Label exit, error;
119
120  // ----- Test all instructions.
121
122  // Test lui, ori, and addiu, used in the li pseudo-instruction.
123  // This way we can then safely load registers with chosen values.
124
125  __ ori(a4, zero_reg, 0);
126  __ lui(a4, 0x1234);
127  __ ori(a4, a4, 0);
128  __ ori(a4, a4, 0x0f0f);
129  __ ori(a4, a4, 0xf0f0);
130  __ addiu(a5, a4, 1);
131  __ addiu(a6, a5, -0x10);
132
133  // Load values in temporary registers.
134  __ li(a4, 0x00000004);
135  __ li(a5, 0x00001234);
136  __ li(a6, 0x12345678);
137  __ li(a7, 0x7fffffff);
138  __ li(t0, 0xfffffffc);
139  __ li(t1, 0xffffedcc);
140  __ li(t2, 0xedcba988);
141  __ li(t3, 0x80000000);
142
143  // SPECIAL class.
144  __ srl(v0, a6, 8);    // 0x00123456
145  __ sll(v0, v0, 11);   // 0x91a2b000
146  __ sra(v0, v0, 3);    // 0xf2345600
147  __ srav(v0, v0, a4);  // 0xff234560
148  __ sllv(v0, v0, a4);  // 0xf2345600
149  __ srlv(v0, v0, a4);  // 0x0f234560
150  __ Branch(&error, ne, v0, Operand(0x0f234560));
151  __ nop();
152
153  __ addu(v0, a4, a5);  // 0x00001238
154  __ subu(v0, v0, a4);  // 0x00001234
155  __ Branch(&error, ne, v0, Operand(0x00001234));
156  __ nop();
157  __ addu(v1, a7, a4);  // 32bit addu result is sign-extended into 64bit reg.
158  __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
159  __ nop();
160  __ subu(v1, t3, a4);  // 0x7ffffffc
161  __ Branch(&error, ne, v1, Operand(0x7ffffffc));
162  __ nop();
163
164  __ and_(v0, a5, a6);  // 0x0000000000001230
165  __ or_(v0, v0, a5);   // 0x0000000000001234
166  __ xor_(v0, v0, a6);  // 0x000000001234444c
167  __ nor(v0, v0, a6);   // 0xffffffffedcba987
168  __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
169  __ nop();
170
171  // Shift both 32bit number to left, to preserve meaning of next comparison.
172  __ dsll32(a7, a7, 0);
173  __ dsll32(t3, t3, 0);
174
175  __ slt(v0, t3, a7);
176  __ Branch(&error, ne, v0, Operand(0x1));
177  __ nop();
178  __ sltu(v0, t3, a7);
179  __ Branch(&error, ne, v0, Operand(zero_reg));
180  __ nop();
181
182  // Restore original values in registers.
183  __ dsrl32(a7, a7, 0);
184  __ dsrl32(t3, t3, 0);
185  // End of SPECIAL class.
186
187  __ addiu(v0, zero_reg, 0x7421);  // 0x00007421
188  __ addiu(v0, v0, -0x1);          // 0x00007420
189  __ addiu(v0, v0, -0x20);         // 0x00007400
190  __ Branch(&error, ne, v0, Operand(0x00007400));
191  __ nop();
192  __ addiu(v1, a7, 0x1);  // 0x80000000 - result is sign-extended.
193  __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
194  __ nop();
195
196  __ slti(v0, a5, 0x00002000);  // 0x1
197  __ slti(v0, v0, 0xffff8000);  // 0x0
198  __ Branch(&error, ne, v0, Operand(zero_reg));
199  __ nop();
200  __ sltiu(v0, a5, 0x00002000);  // 0x1
201  __ sltiu(v0, v0, 0x00008000);  // 0x1
202  __ Branch(&error, ne, v0, Operand(0x1));
203  __ nop();
204
205  __ andi(v0, a5, 0xf0f0);  // 0x00001030
206  __ ori(v0, v0, 0x8a00);   // 0x00009a30
207  __ xori(v0, v0, 0x83cc);  // 0x000019fc
208  __ Branch(&error, ne, v0, Operand(0x000019fc));
209  __ nop();
210  __ lui(v1, 0x8123);  // Result is sign-extended into 64bit register.
211  __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
212  __ nop();
213
214  // Bit twiddling instructions & conditional moves.
215  // Uses a4-t3 as set above.
216  __ Clz(v0, a4);       // 29
217  __ Clz(v1, a5);       // 19
218  __ addu(v0, v0, v1);  // 48
219  __ Clz(v1, a6);       // 3
220  __ addu(v0, v0, v1);  // 51
221  __ Clz(v1, t3);       // 0
222  __ addu(v0, v0, v1);  // 51
223  __ Branch(&error, ne, v0, Operand(51));
224  __ Movn(a0, a7, a4);  // Move a0<-a7 (a4 is NOT 0).
225  __ Ins(a0, a5, 12, 8);  // 0x7ff34fff
226  __ Branch(&error, ne, a0, Operand(0x7ff34fff));
227  __ Movz(a0, t2, t3);    // a0 not updated (t3 is NOT 0).
228  __ Ext(a1, a0, 8, 12);  // 0x34f
229  __ Branch(&error, ne, a1, Operand(0x34f));
230  __ Movz(a0, t2, v1);    // a0<-t2, v0 is 0, from 8 instr back.
231  __ Branch(&error, ne, a0, Operand(t2));
232
233  // Everything was correctly executed. Load the expected result.
234  __ li(v0, 0x31415926);
235  __ b(&exit);
236  __ nop();
237
238  __ bind(&error);
239  // Got an error. Return a wrong result.
240  __ li(v0, 666);
241
242  __ bind(&exit);
243  __ jr(ra);
244  __ nop();
245
246  CodeDesc desc;
247  assm.GetCode(&desc);
248  Handle<Code> code = isolate->factory()->NewCode(
249      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
250  F2 f = FUNCTION_CAST<F2>(code->entry());
251  int64_t res =
252      reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
253  ::printf("f() = %ld\n", res);
254
255  CHECK_EQ(0x31415926L, res);
256}
257
258
259TEST(MIPS3) {
260  // Test floating point instructions.
261  CcTest::InitializeVM();
262  Isolate* isolate = CcTest::i_isolate();
263  HandleScope scope(isolate);
264
265  typedef struct {
266    double a;
267    double b;
268    double c;
269    double d;
270    double e;
271    double f;
272    double g;
273    double h;
274    double i;
275  } T;
276  T t;
277
278  // Create a function that accepts &t, and loads, manipulates, and stores
279  // the doubles t.a ... t.f.
280  MacroAssembler assm(isolate, NULL, 0);
281  Label L, C;
282
283  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
284  __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
285  __ add_d(f8, f4, f6);
286  __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) );  // c = a + b.
287
288  __ mov_d(f10, f8);  // c
289  __ neg_d(f12, f6);  // -b
290  __ sub_d(f10, f10, f12);
291  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) );  // d = c - (-b).
292
293  __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) );   // b = a.
294
295  __ li(a4, 120);
296  __ mtc1(a4, f14);
297  __ cvt_d_w(f14, f14);   // f14 = 120.0.
298  __ mul_d(f10, f10, f14);
299  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) );  // e = d * 120 = 1.8066e16.
300
301  __ div_d(f12, f10, f4);
302  __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) );  // f = e / a = 120.44.
303
304  __ sqrt_d(f14, f12);
305  __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
306  // g = sqrt(f) = 10.97451593465515908537
307
308  if (kArchVariant == kMips64r2) {
309    __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
310    __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
311    __ madd_d(f14, f6, f4, f6);
312    __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
313  }
314
315  __ jr(ra);
316  __ nop();
317
318  CodeDesc desc;
319  assm.GetCode(&desc);
320  Handle<Code> code = isolate->factory()->NewCode(
321      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
322  F3 f = FUNCTION_CAST<F3>(code->entry());
323  t.a = 1.5e14;
324  t.b = 2.75e11;
325  t.c = 0.0;
326  t.d = 0.0;
327  t.e = 0.0;
328  t.f = 0.0;
329  t.h = 1.5;
330  t.i = 2.75;
331  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
332  USE(dummy);
333  CHECK_EQ(1.5e14, t.a);
334  CHECK_EQ(1.5e14, t.b);
335  CHECK_EQ(1.50275e14, t.c);
336  CHECK_EQ(1.50550e14, t.d);
337  CHECK_EQ(1.8066e16, t.e);
338  CHECK_EQ(120.44, t.f);
339  CHECK_EQ(10.97451593465515908537, t.g);
340  if (kArchVariant == kMips64r2) {
341    CHECK_EQ(6.875, t.h);
342  }
343}
344
345
346TEST(MIPS4) {
347  // Test moves between floating point and integer registers.
348  CcTest::InitializeVM();
349  Isolate* isolate = CcTest::i_isolate();
350  HandleScope scope(isolate);
351
352  typedef struct {
353    double a;
354    double b;
355    double c;
356    double d;
357    int64_t high;
358    int64_t low;
359  } T;
360  T t;
361
362  Assembler assm(isolate, NULL, 0);
363  Label L, C;
364
365  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
366  __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)));
367
368  // Swap f4 and f5, by using 3 integer registers, a4-a6,
369  // both two 32-bit chunks, and one 64-bit chunk.
370  // mXhc1 is mips32/64-r2 only, not r1,
371  // but we will not support r1 in practice.
372  __ mfc1(a4, f4);
373  __ mfhc1(a5, f4);
374  __ dmfc1(a6, f5);
375
376  __ mtc1(a4, f5);
377  __ mthc1(a5, f5);
378  __ dmtc1(a6, f4);
379
380  // Store the swapped f4 and f5 back to memory.
381  __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
382  __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)));
383
384  // Test sign extension of move operations from coprocessor.
385  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, d)));
386  __ mfhc1(a4, f4);
387  __ mfc1(a5, f4);
388
389  __ sd(a4, MemOperand(a0, OFFSET_OF(T, high)));
390  __ sd(a5, MemOperand(a0, OFFSET_OF(T, low)));
391
392  __ jr(ra);
393  __ nop();
394
395  CodeDesc desc;
396  assm.GetCode(&desc);
397  Handle<Code> code = isolate->factory()->NewCode(
398      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
399  F3 f = FUNCTION_CAST<F3>(code->entry());
400  t.a = 1.5e22;
401  t.b = 2.75e11;
402  t.c = 17.17;
403  t.d = -2.75e11;
404  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
405  USE(dummy);
406
407  CHECK_EQ(2.75e11, t.a);
408  CHECK_EQ(2.75e11, t.b);
409  CHECK_EQ(1.5e22, t.c);
410  CHECK_EQ(0xffffffffc25001d1L, t.high);
411  CHECK_EQ(0xffffffffbf800000L, t.low);
412}
413
414
415TEST(MIPS5) {
416  // Test conversions between doubles and integers.
417  CcTest::InitializeVM();
418  Isolate* isolate = CcTest::i_isolate();
419  HandleScope scope(isolate);
420
421  typedef struct {
422    double a;
423    double b;
424    int i;
425    int j;
426  } T;
427  T t;
428
429  Assembler assm(isolate, NULL, 0);
430  Label L, C;
431
432  // Load all structure elements to registers.
433  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
434  __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
435  __ lw(a4, MemOperand(a0, OFFSET_OF(T, i)) );
436  __ lw(a5, MemOperand(a0, OFFSET_OF(T, j)) );
437
438  // Convert double in f4 to int in element i.
439  __ cvt_w_d(f8, f4);
440  __ mfc1(a6, f8);
441  __ sw(a6, MemOperand(a0, OFFSET_OF(T, i)) );
442
443  // Convert double in f6 to int in element j.
444  __ cvt_w_d(f10, f6);
445  __ mfc1(a7, f10);
446  __ sw(a7, MemOperand(a0, OFFSET_OF(T, j)) );
447
448  // Convert int in original i (a4) to double in a.
449  __ mtc1(a4, f12);
450  __ cvt_d_w(f0, f12);
451  __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
452
453  // Convert int in original j (a5) to double in b.
454  __ mtc1(a5, f14);
455  __ cvt_d_w(f2, f14);
456  __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
457
458  __ jr(ra);
459  __ nop();
460
461  CodeDesc desc;
462  assm.GetCode(&desc);
463  Handle<Code> code = isolate->factory()->NewCode(
464      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
465  F3 f = FUNCTION_CAST<F3>(code->entry());
466  t.a = 1.5e4;
467  t.b = 2.75e8;
468  t.i = 12345678;
469  t.j = -100000;
470  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
471  USE(dummy);
472
473  CHECK_EQ(12345678.0, t.a);
474  CHECK_EQ(-100000.0, t.b);
475  CHECK_EQ(15000, t.i);
476  CHECK_EQ(275000000, t.j);
477}
478
479
480TEST(MIPS6) {
481  // Test simple memory loads and stores.
482  CcTest::InitializeVM();
483  Isolate* isolate = CcTest::i_isolate();
484  HandleScope scope(isolate);
485
486  typedef struct {
487    uint32_t ui;
488    int32_t si;
489    int32_t r1;
490    int32_t r2;
491    int32_t r3;
492    int32_t r4;
493    int32_t r5;
494    int32_t r6;
495  } T;
496  T t;
497
498  Assembler assm(isolate, NULL, 0);
499  Label L, C;
500
501  // Basic word load/store.
502  __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
503  __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
504
505  // lh with positive data.
506  __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
507  __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
508
509  // lh with negative data.
510  __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
511  __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
512
513  // lhu with negative data.
514  __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
515  __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
516
517  // lb with negative data.
518  __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
519  __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
520
521  // sh writes only 1/2 of word.
522  __ lui(t1, 0x3333);
523  __ ori(t1, t1, 0x3333);
524  __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
525  __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
526  __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
527
528  __ jr(ra);
529  __ nop();
530
531  CodeDesc desc;
532  assm.GetCode(&desc);
533  Handle<Code> code = isolate->factory()->NewCode(
534      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
535  F3 f = FUNCTION_CAST<F3>(code->entry());
536  t.ui = 0x11223344;
537  t.si = 0x99aabbcc;
538  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
539  USE(dummy);
540
541  CHECK_EQ(0x11223344, t.r1);
542  CHECK_EQ(0x3344, t.r2);
543  CHECK_EQ(0xffffbbcc, t.r3);
544  CHECK_EQ(0x0000bbcc, t.r4);
545  CHECK_EQ(0xffffffcc, t.r5);
546  CHECK_EQ(0x3333bbcc, t.r6);
547}
548
549
550TEST(MIPS7) {
551  // Test floating point compare and branch instructions.
552  CcTest::InitializeVM();
553  Isolate* isolate = CcTest::i_isolate();
554  HandleScope scope(isolate);
555
556  typedef struct {
557    double a;
558    double b;
559    double c;
560    double d;
561    double e;
562    double f;
563    int32_t result;
564  } T;
565  T t;
566
567  // Create a function that accepts &t, and loads, manipulates, and stores
568  // the doubles t.a ... t.f.
569  MacroAssembler assm(isolate, NULL, 0);
570  Label neither_is_nan, less_than, outa_here;
571
572  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
573  __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
574  if (kArchVariant != kMips64r6) {
575    __ c(UN, D, f4, f6);
576    __ bc1f(&neither_is_nan);
577  } else {
578    __ cmp(UN, L, f2, f4, f6);
579    __ bc1eqz(&neither_is_nan, f2);
580  }
581  __ nop();
582  __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
583  __ Branch(&outa_here);
584
585  __ bind(&neither_is_nan);
586
587  if (kArchVariant == kMips64r6) {
588    __ cmp(OLT, L, f2, f6, f4);
589    __ bc1nez(&less_than, f2);
590  } else {
591    __ c(OLT, D, f6, f4, 2);
592    __ bc1t(&less_than, 2);
593  }
594
595  __ nop();
596  __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
597  __ Branch(&outa_here);
598
599  __ bind(&less_than);
600  __ Addu(a4, zero_reg, Operand(1));
601  __ sw(a4, MemOperand(a0, OFFSET_OF(T, result)) );  // Set true.
602
603
604  // This test-case should have additional tests.
605
606  __ bind(&outa_here);
607
608  __ jr(ra);
609  __ nop();
610
611  CodeDesc desc;
612  assm.GetCode(&desc);
613  Handle<Code> code = isolate->factory()->NewCode(
614      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
615  F3 f = FUNCTION_CAST<F3>(code->entry());
616  t.a = 1.5e14;
617  t.b = 2.75e11;
618  t.c = 2.0;
619  t.d = -4.0;
620  t.e = 0.0;
621  t.f = 0.0;
622  t.result = 0;
623  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
624  USE(dummy);
625  CHECK_EQ(1.5e14, t.a);
626  CHECK_EQ(2.75e11, t.b);
627  CHECK_EQ(1, t.result);
628}
629
630
631TEST(MIPS8) {
632  // Test ROTR and ROTRV instructions.
633  CcTest::InitializeVM();
634  Isolate* isolate = CcTest::i_isolate();
635  HandleScope scope(isolate);
636
637  typedef struct {
638    int32_t input;
639    int32_t result_rotr_4;
640    int32_t result_rotr_8;
641    int32_t result_rotr_12;
642    int32_t result_rotr_16;
643    int32_t result_rotr_20;
644    int32_t result_rotr_24;
645    int32_t result_rotr_28;
646    int32_t result_rotrv_4;
647    int32_t result_rotrv_8;
648    int32_t result_rotrv_12;
649    int32_t result_rotrv_16;
650    int32_t result_rotrv_20;
651    int32_t result_rotrv_24;
652    int32_t result_rotrv_28;
653  } T;
654  T t;
655
656  MacroAssembler assm(isolate, NULL, 0);
657
658  // Basic word load.
659  __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
660
661  // ROTR instruction (called through the Ror macro).
662  __ Ror(a5, a4, 0x0004);
663  __ Ror(a6, a4, 0x0008);
664  __ Ror(a7, a4, 0x000c);
665  __ Ror(t0, a4, 0x0010);
666  __ Ror(t1, a4, 0x0014);
667  __ Ror(t2, a4, 0x0018);
668  __ Ror(t3, a4, 0x001c);
669
670  // Basic word store.
671  __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
672  __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
673  __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
674  __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
675  __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
676  __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
677  __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
678
679  // ROTRV instruction (called through the Ror macro).
680  __ li(t3, 0x0004);
681  __ Ror(a5, a4, t3);
682  __ li(t3, 0x0008);
683  __ Ror(a6, a4, t3);
684  __ li(t3, 0x000C);
685  __ Ror(a7, a4, t3);
686  __ li(t3, 0x0010);
687  __ Ror(t0, a4, t3);
688  __ li(t3, 0x0014);
689  __ Ror(t1, a4, t3);
690  __ li(t3, 0x0018);
691  __ Ror(t2, a4, t3);
692  __ li(t3, 0x001C);
693  __ Ror(t3, a4, t3);
694
695  // Basic word store.
696  __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
697  __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
698  __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
699  __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
700  __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
701  __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
702  __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
703
704  __ jr(ra);
705  __ nop();
706
707  CodeDesc desc;
708  assm.GetCode(&desc);
709  Handle<Code> code = isolate->factory()->NewCode(
710      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
711  F3 f = FUNCTION_CAST<F3>(code->entry());
712  t.input = 0x12345678;
713  Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
714  USE(dummy);
715  CHECK_EQ(0x81234567, t.result_rotr_4);
716  CHECK_EQ(0x78123456, t.result_rotr_8);
717  CHECK_EQ(0x67812345, t.result_rotr_12);
718  CHECK_EQ(0x56781234, t.result_rotr_16);
719  CHECK_EQ(0x45678123, t.result_rotr_20);
720  CHECK_EQ(0x34567812, t.result_rotr_24);
721  CHECK_EQ(0x23456781, t.result_rotr_28);
722
723  CHECK_EQ(0x81234567, t.result_rotrv_4);
724  CHECK_EQ(0x78123456, t.result_rotrv_8);
725  CHECK_EQ(0x67812345, t.result_rotrv_12);
726  CHECK_EQ(0x56781234, t.result_rotrv_16);
727  CHECK_EQ(0x45678123, t.result_rotrv_20);
728  CHECK_EQ(0x34567812, t.result_rotrv_24);
729  CHECK_EQ(0x23456781, t.result_rotrv_28);
730}
731
732
733TEST(MIPS9) {
734  // Test BRANCH improvements.
735  CcTest::InitializeVM();
736  Isolate* isolate = CcTest::i_isolate();
737  HandleScope scope(isolate);
738
739  MacroAssembler assm(isolate, NULL, 0);
740  Label exit, exit2, exit3;
741
742  __ Branch(&exit, ge, a0, Operand(zero_reg));
743  __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
744  __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
745
746  __ bind(&exit);
747  __ bind(&exit2);
748  __ bind(&exit3);
749  __ jr(ra);
750  __ nop();
751
752  CodeDesc desc;
753  assm.GetCode(&desc);
754  isolate->factory()->NewCode(
755      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
756}
757
758
759TEST(MIPS10) {
760  // Test conversions between doubles and long integers.
761  // Test hos the long ints map to FP regs pairs.
762  CcTest::InitializeVM();
763  Isolate* isolate = CcTest::i_isolate();
764  HandleScope scope(isolate);
765
766  typedef struct {
767    double a;
768    double a_converted;
769    double b;
770    int32_t dbl_mant;
771    int32_t dbl_exp;
772    int32_t long_hi;
773    int32_t long_lo;
774    int64_t long_as_int64;
775    int32_t b_long_hi;
776    int32_t b_long_lo;
777    int64_t b_long_as_int64;
778  } T;
779  T t;
780
781  Assembler assm(isolate, NULL, 0);
782  Label L, C;
783
784  if (kArchVariant == kMips64r2) {
785    // Rewritten for FR=1 FPU mode:
786    //  -  32 FP regs of 64-bits each, no odd/even pairs.
787    //  -  Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
788    // Load all structure elements to registers.
789    __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
790
791    // Save the raw bits of the double.
792    __ mfc1(a4, f0);
793    __ mfhc1(a5, f0);
794    __ sw(a4, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
795    __ sw(a5, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
796
797    // Convert double in f0 to long, save hi/lo parts.
798    __ cvt_l_d(f0, f0);
799    __ mfc1(a4, f0);  // f0 LS 32 bits of long.
800    __ mfhc1(a5, f0);  // f0 MS 32 bits of long.
801    __ sw(a4, MemOperand(a0, OFFSET_OF(T, long_lo)));
802    __ sw(a5, MemOperand(a0, OFFSET_OF(T, long_hi)));
803
804    // Combine the high/low ints, convert back to double.
805    __ dsll32(a6, a5, 0);  // Move a5 to high bits of a6.
806    __ or_(a6, a6, a4);
807    __ dmtc1(a6, f1);
808    __ cvt_d_l(f1, f1);
809    __ sdc1(f1, MemOperand(a0, OFFSET_OF(T, a_converted)));
810
811
812    // Convert the b long integers to double b.
813    __ lw(a4, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
814    __ lw(a5, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
815    __ mtc1(a4, f8);  // f8 LS 32-bits.
816    __ mthc1(a5, f8);  // f8 MS 32-bits.
817    __ cvt_d_l(f10, f8);
818    __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
819
820    // Convert double b back to long-int.
821    __ ldc1(f31, MemOperand(a0, OFFSET_OF(T, b)));
822    __ cvt_l_d(f31, f31);
823    __ dmfc1(a7, f31);
824    __ sd(a7, MemOperand(a0, OFFSET_OF(T, b_long_as_int64)));
825
826
827    __ jr(ra);
828    __ nop();
829
830    CodeDesc desc;
831    assm.GetCode(&desc);
832    Handle<Code> code = isolate->factory()->NewCode(
833        desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
834    F3 f = FUNCTION_CAST<F3>(code->entry());
835    t.a = 2.147483647e9;       // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
836    t.b_long_hi = 0x000000ff;  // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
837    t.b_long_lo = 0x00ff00ff;
838    Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
839    USE(dummy);
840
841    CHECK_EQ(0x41DFFFFF, t.dbl_exp);
842    CHECK_EQ(0xFFC00000, t.dbl_mant);
843    CHECK_EQ(0, t.long_hi);
844    CHECK_EQ(0x7fffffff, t.long_lo);
845    CHECK_EQ(2.147483647e9, t.a_converted);
846
847    // 0xFF00FF00FF -> 1.095233372415e12.
848    CHECK_EQ(1.095233372415e12, t.b);
849    CHECK_EQ(0xFF00FF00FF, t.b_long_as_int64);
850  }
851}
852
853
854TEST(MIPS11) {
855  // Do not run test on MIPS64r6, as these instructions are removed.
856  if (kArchVariant != kMips64r6) {
857    // Test LWL, LWR, SWL and SWR instructions.
858    CcTest::InitializeVM();
859    Isolate* isolate = CcTest::i_isolate();
860    HandleScope scope(isolate);
861
862    typedef struct {
863      int32_t reg_init;
864      int32_t mem_init;
865      int32_t lwl_0;
866      int32_t lwl_1;
867      int32_t lwl_2;
868      int32_t lwl_3;
869      int32_t lwr_0;
870      int32_t lwr_1;
871      int32_t lwr_2;
872      int32_t lwr_3;
873      int32_t swl_0;
874      int32_t swl_1;
875      int32_t swl_2;
876      int32_t swl_3;
877      int32_t swr_0;
878      int32_t swr_1;
879      int32_t swr_2;
880      int32_t swr_3;
881    } T;
882    T t;
883
884    Assembler assm(isolate, NULL, 0);
885
886    // Test all combinations of LWL and vAddr.
887    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
888    __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
889    __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)));
890
891    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
892    __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
893    __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)));
894
895    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
896    __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
897    __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)));
898
899    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
900    __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
901    __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)));
902
903    // Test all combinations of LWR and vAddr.
904    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
905    __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
906    __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)));
907
908    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
909    __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
910    __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)));
911
912    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
913    __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
914    __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
915
916    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
917    __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
918    __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
919
920    // Test all combinations of SWL and vAddr.
921    __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
922    __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
923    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
924    __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
925
926    __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
927    __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)));
928    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
929    __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1));
930
931    __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
932    __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)));
933    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
934    __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2));
935
936    __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
937    __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)));
938    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
939    __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3));
940
941    // Test all combinations of SWR and vAddr.
942    __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
943    __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
944    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
945    __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
946
947    __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
948    __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)));
949    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
950    __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1));
951
952    __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
953    __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)));
954    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
955    __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2));
956
957    __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
958    __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)));
959    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
960    __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3));
961
962    __ jr(ra);
963    __ nop();
964
965    CodeDesc desc;
966    assm.GetCode(&desc);
967    Handle<Code> code = isolate->factory()->NewCode(
968        desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
969    F3 f = FUNCTION_CAST<F3>(code->entry());
970    t.reg_init = 0xaabbccdd;
971    t.mem_init = 0x11223344;
972
973    Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
974    USE(dummy);
975
976    CHECK_EQ(0x44bbccdd, t.lwl_0);
977    CHECK_EQ(0x3344ccdd, t.lwl_1);
978    CHECK_EQ(0x223344dd, t.lwl_2);
979    CHECK_EQ(0x11223344, t.lwl_3);
980
981    CHECK_EQ(0x11223344, t.lwr_0);
982    CHECK_EQ(0xaa112233, t.lwr_1);
983    CHECK_EQ(0xaabb1122, t.lwr_2);
984    CHECK_EQ(0xaabbcc11, t.lwr_3);
985
986    CHECK_EQ(0x112233aa, t.swl_0);
987    CHECK_EQ(0x1122aabb, t.swl_1);
988    CHECK_EQ(0x11aabbcc, t.swl_2);
989    CHECK_EQ(0xaabbccdd, t.swl_3);
990
991    CHECK_EQ(0xaabbccdd, t.swr_0);
992    CHECK_EQ(0xbbccdd44, t.swr_1);
993    CHECK_EQ(0xccdd3344, t.swr_2);
994    CHECK_EQ(0xdd223344, t.swr_3);
995  }
996}
997
998
999TEST(MIPS12) {
1000  CcTest::InitializeVM();
1001  Isolate* isolate = CcTest::i_isolate();
1002  HandleScope scope(isolate);
1003
1004  typedef struct {
1005      int32_t  x;
1006      int32_t  y;
1007      int32_t  y1;
1008      int32_t  y2;
1009      int32_t  y3;
1010      int32_t  y4;
1011  } T;
1012  T t;
1013
1014  MacroAssembler assm(isolate, NULL, 0);
1015
1016  __ mov(t2, fp);  // Save frame pointer.
1017  __ mov(fp, a0);  // Access struct T by fp.
1018  __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)));
1019  __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)));
1020
1021  __ addu(a5, a4, a7);
1022  __ subu(t0, a4, a7);
1023  __ nop();
1024  __ push(a4);  // These instructions disappear after opt.
1025  __ Pop();
1026  __ addu(a4, a4, a4);
1027  __ nop();
1028  __ Pop();     // These instructions disappear after opt.
1029  __ push(a7);
1030  __ nop();
1031  __ push(a7);  // These instructions disappear after opt.
1032  __ pop(a7);
1033  __ nop();
1034  __ push(a7);
1035  __ pop(t0);
1036  __ nop();
1037  __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1038  __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1039  __ nop();
1040  __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
1041  __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
1042  __ nop();
1043  __ push(a5);
1044  __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
1045  __ pop(a5);
1046  __ nop();
1047  __ push(a5);
1048  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1049  __ pop(a5);
1050  __ nop();
1051  __ push(a5);
1052  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1053  __ pop(a6);
1054  __ nop();
1055  __ push(a6);
1056  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1057  __ pop(a5);
1058  __ nop();
1059  __ push(a5);
1060  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
1061  __ pop(a7);
1062  __ nop();
1063
1064  __ mov(fp, t2);
1065  __ jr(ra);
1066  __ nop();
1067
1068  CodeDesc desc;
1069  assm.GetCode(&desc);
1070  Handle<Code> code = isolate->factory()->NewCode(
1071      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1072  F3 f = FUNCTION_CAST<F3>(code->entry());
1073  t.x = 1;
1074  t.y = 2;
1075  t.y1 = 3;
1076  t.y2 = 4;
1077  t.y3 = 0XBABA;
1078  t.y4 = 0xDEDA;
1079
1080  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1081  USE(dummy);
1082
1083  CHECK_EQ(3, t.y1);
1084}
1085
1086
1087TEST(MIPS13) {
1088  // Test Cvt_d_uw and Trunc_uw_d macros.
1089  CcTest::InitializeVM();
1090  Isolate* isolate = CcTest::i_isolate();
1091  HandleScope scope(isolate);
1092
1093  typedef struct {
1094    double cvt_big_out;
1095    double cvt_small_out;
1096    uint32_t trunc_big_out;
1097    uint32_t trunc_small_out;
1098    uint32_t cvt_big_in;
1099    uint32_t cvt_small_in;
1100  } T;
1101  T t;
1102
1103  MacroAssembler assm(isolate, NULL, 0);
1104
1105  __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
1106  __ Cvt_d_uw(f10, a4, f22);
1107  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
1108
1109  __ Trunc_uw_d(f10, f10, f22);
1110  __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
1111
1112  __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
1113  __ Cvt_d_uw(f8, a4, f22);
1114  __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
1115
1116  __ Trunc_uw_d(f8, f8, f22);
1117  __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
1118
1119  __ jr(ra);
1120  __ nop();
1121
1122  CodeDesc desc;
1123  assm.GetCode(&desc);
1124  Handle<Code> code = isolate->factory()->NewCode(
1125      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1126  F3 f = FUNCTION_CAST<F3>(code->entry());
1127
1128  t.cvt_big_in = 0xFFFFFFFF;
1129  t.cvt_small_in  = 333;
1130
1131  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1132  USE(dummy);
1133
1134  CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
1135  CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
1136
1137  CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
1138  CHECK_EQ(static_cast<int>(t.trunc_small_out),
1139           static_cast<int>(t.cvt_small_in));
1140}
1141
1142
1143TEST(MIPS14) {
1144  // Test round, floor, ceil, trunc, cvt.
1145  CcTest::InitializeVM();
1146  Isolate* isolate = CcTest::i_isolate();
1147  HandleScope scope(isolate);
1148
1149#define ROUND_STRUCT_ELEMENT(x) \
1150  int32_t x##_up_out; \
1151  int32_t x##_down_out; \
1152  int32_t neg_##x##_up_out; \
1153  int32_t neg_##x##_down_out; \
1154  uint32_t x##_err1_out; \
1155  uint32_t x##_err2_out; \
1156  uint32_t x##_err3_out; \
1157  uint32_t x##_err4_out; \
1158  int32_t x##_invalid_result;
1159
1160  typedef struct {
1161    double round_up_in;
1162    double round_down_in;
1163    double neg_round_up_in;
1164    double neg_round_down_in;
1165    double err1_in;
1166    double err2_in;
1167    double err3_in;
1168    double err4_in;
1169
1170    ROUND_STRUCT_ELEMENT(round)
1171    ROUND_STRUCT_ELEMENT(floor)
1172    ROUND_STRUCT_ELEMENT(ceil)
1173    ROUND_STRUCT_ELEMENT(trunc)
1174    ROUND_STRUCT_ELEMENT(cvt)
1175  } T;
1176  T t;
1177
1178#undef ROUND_STRUCT_ELEMENT
1179
1180  MacroAssembler assm(isolate, NULL, 0);
1181
1182  // Save FCSR.
1183  __ cfc1(a1, FCSR);
1184  // Disable FPU exceptions.
1185  __ ctc1(zero_reg, FCSR);
1186#define RUN_ROUND_TEST(x) \
1187  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
1188  __ x##_w_d(f0, f0); \
1189  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
1190  \
1191  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
1192  __ x##_w_d(f0, f0); \
1193  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
1194  \
1195  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
1196  __ x##_w_d(f0, f0); \
1197  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
1198  \
1199  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
1200  __ x##_w_d(f0, f0); \
1201  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
1202  \
1203  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
1204  __ ctc1(zero_reg, FCSR); \
1205  __ x##_w_d(f0, f0); \
1206  __ cfc1(a2, FCSR); \
1207  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
1208  \
1209  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
1210  __ ctc1(zero_reg, FCSR); \
1211  __ x##_w_d(f0, f0); \
1212  __ cfc1(a2, FCSR); \
1213  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
1214  \
1215  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
1216  __ ctc1(zero_reg, FCSR); \
1217  __ x##_w_d(f0, f0); \
1218  __ cfc1(a2, FCSR); \
1219  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
1220  \
1221  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
1222  __ ctc1(zero_reg, FCSR); \
1223  __ x##_w_d(f0, f0); \
1224  __ cfc1(a2, FCSR); \
1225  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
1226  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
1227
1228  RUN_ROUND_TEST(round)
1229  RUN_ROUND_TEST(floor)
1230  RUN_ROUND_TEST(ceil)
1231  RUN_ROUND_TEST(trunc)
1232  RUN_ROUND_TEST(cvt)
1233
1234  // Restore FCSR.
1235  __ ctc1(a1, FCSR);
1236
1237  __ jr(ra);
1238  __ nop();
1239
1240  CodeDesc desc;
1241  assm.GetCode(&desc);
1242  Handle<Code> code = isolate->factory()->NewCode(
1243      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1244  F3 f = FUNCTION_CAST<F3>(code->entry());
1245
1246  t.round_up_in = 123.51;
1247  t.round_down_in = 123.49;
1248  t.neg_round_up_in = -123.5;
1249  t.neg_round_down_in = -123.49;
1250  t.err1_in = 123.51;
1251  t.err2_in = 1;
1252  t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
1253  t.err4_in = NAN;
1254
1255  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1256  USE(dummy);
1257
1258#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
1259#define CHECK_ROUND_RESULT(type) \
1260  CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
1261  CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
1262  CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
1263  CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
1264  CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
1265
1266  CHECK_ROUND_RESULT(round);
1267  CHECK_ROUND_RESULT(floor);
1268  CHECK_ROUND_RESULT(ceil);
1269  CHECK_ROUND_RESULT(cvt);
1270}
1271
1272
1273TEST(MIPS15) {
1274  // Test chaining of label usages within instructions (issue 1644).
1275  CcTest::InitializeVM();
1276  Isolate* isolate = CcTest::i_isolate();
1277  HandleScope scope(isolate);
1278  Assembler assm(isolate, NULL, 0);
1279
1280  Label target;
1281  __ beq(v0, v1, &target);
1282  __ nop();
1283  __ bne(v0, v1, &target);
1284  __ nop();
1285  __ bind(&target);
1286  __ nop();
1287}
1288
1289
1290// ----- mips64 tests -----------------------------------------------
1291
1292TEST(MIPS16) {
1293  // Test 64-bit memory loads and stores.
1294  CcTest::InitializeVM();
1295  Isolate* isolate = CcTest::i_isolate();
1296  HandleScope scope(isolate);
1297
1298  typedef struct {
1299    int64_t r1;
1300    int64_t r2;
1301    int64_t r3;
1302    int64_t r4;
1303    int64_t r5;
1304    int64_t r6;
1305    uint32_t ui;
1306    int32_t si;
1307  } T;
1308  T t;
1309
1310  Assembler assm(isolate, NULL, 0);
1311  Label L, C;
1312
1313  // Basic 32-bit word load/store, with un-signed data.
1314  __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)));
1315  __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)));
1316
1317  // Check that the data got zero-extended into 64-bit a4.
1318  __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)));
1319
1320  // Basic 32-bit word load/store, with SIGNED data.
1321  __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)));
1322  __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)));
1323
1324  // Check that the data got sign-extended into 64-bit a4.
1325  __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)));
1326
1327  // 32-bit UNSIGNED word load/store, with SIGNED data.
1328  __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)));
1329  __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)));
1330
1331  // Check that the data got zero-extended into 64-bit a4.
1332  __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)));
1333
1334  // lh with positive data.
1335  __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)));
1336  __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)));
1337
1338  // lh with negative data.
1339  __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)));
1340  __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)));
1341
1342  // lhu with negative data.
1343  __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)));
1344  __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)));
1345
1346  // lb with negative data.
1347  __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)));
1348  __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)));
1349
1350  // // sh writes only 1/2 of word.
1351  __ lui(t1, 0x3333);
1352  __ ori(t1, t1, 0x3333);
1353  __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)));
1354  __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)));
1355  __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)));
1356
1357  __ jr(ra);
1358  __ nop();
1359
1360  CodeDesc desc;
1361  assm.GetCode(&desc);
1362  Handle<Code> code = isolate->factory()->NewCode(
1363      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
1364  F3 f = FUNCTION_CAST<F3>(code->entry());
1365  t.ui = 0x44332211;
1366  t.si = 0x99aabbcc;
1367  t.r1 = 0x1111111111111111;
1368  t.r2 = 0x2222222222222222;
1369  t.r3 = 0x3333333333333333;
1370  t.r4 = 0x4444444444444444;
1371  t.r5 = 0x5555555555555555;
1372  t.r6 = 0x6666666666666666;
1373  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
1374  USE(dummy);
1375
1376  // Unsigned data, 32 & 64.
1377  CHECK_EQ(0x1111111144332211L, t.r1);
1378  CHECK_EQ(0x0000000000002211L, t.r2);
1379
1380  // Signed data, 32 & 64.
1381  CHECK_EQ(0x33333333ffffbbccL, t.r3);
1382  CHECK_EQ(0xffffffff0000bbccL, t.r4);
1383
1384  // Signed data, 32 & 64.
1385  CHECK_EQ(0x55555555ffffffccL, t.r5);
1386  CHECK_EQ(0x000000003333bbccL, t.r6);
1387}
1388
1389#undef __
1390