assembler_thumb_test.cc revision 6fd0ffe8da212723a3ac0256ce350b5872cc61d4
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <dirent.h>
18#include <errno.h>
19#include <fstream>
20#include <map>
21#include <string.h>
22#include <sys/types.h>
23
24#include "gtest/gtest.h"
25#include "utils/arm/assembler_thumb2.h"
26#include "base/hex_dump.h"
27#include "common_runtime_test.h"
28
29namespace art {
30namespace arm {
31
32// Include results file (generated manually)
33#include "assembler_thumb_test_expected.cc.inc"
34
35#ifndef __ANDROID__
36// This controls whether the results are printed to the
37// screen or compared against the expected output.
38// To generate new expected output, set this to true and
39// copy the output into the .cc.inc file in the form
40// of the other results.
41//
42// When this is false, the results are not printed to the
43// output, but are compared against the expected results
44// in the .cc.inc file.
45static constexpr bool kPrintResults = false;
46#endif
47
48void SetAndroidData() {
49  const char* data = getenv("ANDROID_DATA");
50  if (data == nullptr) {
51    setenv("ANDROID_DATA", "/tmp", 1);
52  }
53}
54
55int CompareIgnoringSpace(const char* s1, const char* s2) {
56  while (*s1 != '\0') {
57    while (isspace(*s1)) ++s1;
58    while (isspace(*s2)) ++s2;
59    if (*s1 == '\0' || *s1 != *s2) {
60      break;
61    }
62    ++s1;
63    ++s2;
64  }
65  return *s1 - *s2;
66}
67
68void InitResults() {
69  if (test_results.empty()) {
70    setup_results();
71  }
72}
73
74std::string GetToolsDir() {
75#ifndef __ANDROID__
76  // This will only work on the host.  There is no as, objcopy or objdump on the device.
77  static std::string toolsdir;
78
79  if (toolsdir.empty()) {
80    setup_results();
81    toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
82    SetAndroidData();
83  }
84
85  return toolsdir;
86#else
87  return std::string();
88#endif
89}
90
91void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) {
92#ifndef __ANDROID__
93  static std::string toolsdir = GetToolsDir();
94
95  ScratchFile file;
96
97  const char* filename = file.GetFilename().c_str();
98
99  std::ofstream out(filename);
100  if (out) {
101    out << ".section \".text\"\n";
102    out << ".syntax unified\n";
103    out << ".arch armv7-a\n";
104    out << ".thumb\n";
105    out << ".thumb_func\n";
106    out << ".type " << testname << ", #function\n";
107    out << ".global " << testname << "\n";
108    out << testname << ":\n";
109    out << ".fnstart\n";
110
111    for (uint32_t i = 0 ; i < code.size(); ++i) {
112      out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
113    }
114    out << ".fnend\n";
115    out << ".size " << testname << ", .-" << testname << "\n";
116  }
117  out.close();
118
119  char cmd[1024];
120
121  // Assemble the .S
122  snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
123  int cmd_result = system(cmd);
124  ASSERT_EQ(cmd_result, 0) << strerror(errno);
125
126  // Remove the $d symbols to prevent the disassembler dumping the instructions
127  // as .word
128  snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
129  int cmd_result2 = system(cmd);
130  ASSERT_EQ(cmd_result2, 0) << strerror(errno);
131
132  // Disassemble.
133
134  snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^  *[0-9a-f][0-9a-f]*:'",
135    toolsdir.c_str(), filename);
136  if (kPrintResults) {
137    // Print the results only, don't check. This is used to generate new output for inserting
138    // into the .inc file.
139    int cmd_result3 = system(cmd);
140    ASSERT_EQ(cmd_result3, 0) << strerror(errno);
141  } else {
142    // Check the results match the appropriate results in the .inc file.
143    FILE *fp = popen(cmd, "r");
144    ASSERT_TRUE(fp != nullptr);
145
146    uint32_t lineindex = 0;
147
148    while (!feof(fp)) {
149      char testline[256];
150      char *s = fgets(testline, sizeof(testline), fp);
151      if (s == nullptr) {
152        break;
153      }
154      if (CompareIgnoringSpace(results[lineindex], testline) != 0) {
155        LOG(FATAL) << "Output is not as expected at line: " << lineindex
156          << results[lineindex] << "/" << testline;
157      }
158      ++lineindex;
159    }
160    // Check that we are at the end.
161    ASSERT_TRUE(results[lineindex] == nullptr);
162    fclose(fp);
163  }
164
165  char buf[FILENAME_MAX];
166  snprintf(buf, sizeof(buf), "%s.o", filename);
167  unlink(buf);
168
169  snprintf(buf, sizeof(buf), "%s.oo", filename);
170  unlink(buf);
171#endif
172}
173
174#define __ assembler->
175
176void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname,
177                  const char* const* results) {
178  __ FinalizeCode();
179  size_t cs = __ CodeSize();
180  std::vector<uint8_t> managed_code(cs);
181  MemoryRegion code(&managed_code[0], managed_code.size());
182  __ FinalizeInstructions(code);
183
184  DumpAndCheck(managed_code, testname, results);
185}
186
187void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
188  InitResults();
189  std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
190  ASSERT_NE(results, test_results.end());
191
192  EmitAndCheck(assembler, testname, results->second);
193}
194
195#undef __
196
197#define __ assembler.
198
199TEST(Thumb2AssemblerTest, SimpleMov) {
200  arm::Thumb2Assembler assembler;
201
202  __ movs(R0, ShifterOperand(R1));
203  __ mov(R0, ShifterOperand(R1));
204  __ mov(R8, ShifterOperand(R9));
205
206  __ mov(R0, ShifterOperand(1));
207  __ mov(R8, ShifterOperand(9));
208
209  EmitAndCheck(&assembler, "SimpleMov");
210}
211
212TEST(Thumb2AssemblerTest, SimpleMov32) {
213  arm::Thumb2Assembler assembler;
214  __ Force32Bit();
215
216  __ mov(R0, ShifterOperand(R1));
217  __ mov(R8, ShifterOperand(R9));
218
219  EmitAndCheck(&assembler, "SimpleMov32");
220}
221
222TEST(Thumb2AssemblerTest, SimpleMovAdd) {
223  arm::Thumb2Assembler assembler;
224
225  __ mov(R0, ShifterOperand(R1));
226  __ adds(R0, R1, ShifterOperand(R2));
227  __ add(R0, R1, ShifterOperand(0));
228
229  EmitAndCheck(&assembler, "SimpleMovAdd");
230}
231
232TEST(Thumb2AssemblerTest, DataProcessingRegister) {
233  arm::Thumb2Assembler assembler;
234
235  // 32 bit variants using low registers.
236  __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
237  __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
238  __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
239  __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
240  __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
241  __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
242  __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
243  __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
244  __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
245  __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
246  __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
247  __ teq(R0, ShifterOperand(R1));
248
249  // 16 bit variants using low registers.
250  __ movs(R0, ShifterOperand(R1));
251  __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
252  __ mvns(R0, ShifterOperand(R1));
253  __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
254  __ adds(R0, R1, ShifterOperand(R2));
255  __ subs(R0, R1, ShifterOperand(R2));
256  __ adcs(R0, R0, ShifterOperand(R1));
257  __ sbcs(R0, R0, ShifterOperand(R1));
258  __ ands(R0, R0, ShifterOperand(R1));
259  __ orrs(R0, R0, ShifterOperand(R1));
260  __ eors(R0, R0, ShifterOperand(R1));
261  __ bics(R0, R0, ShifterOperand(R1));
262  __ tst(R0, ShifterOperand(R1));
263  __ cmp(R0, ShifterOperand(R1));
264  __ cmn(R0, ShifterOperand(R1));
265
266  // 16-bit variants using high registers.
267  __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
268  __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
269  __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
270  __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
271  __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
272  __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
273  __ cmp(R0, ShifterOperand(R9));
274  __ cmp(R8, ShifterOperand(R1));
275  __ cmp(R9, ShifterOperand(R8));
276
277  // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
278  // an immediate (0) but emitted without any, so we test it here.
279  __ rsbs(R0, R1, ShifterOperand(0));
280  __ rsbs(R0, R0, ShifterOperand(0));  // Check Rd == Rn code path.
281
282  // 32 bit variants using high registers that would be 16-bit if using low registers.
283  __ movs(R0, ShifterOperand(R8));
284  __ mvns(R0, ShifterOperand(R8));
285  __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
286  __ adds(R0, R1, ShifterOperand(R8));
287  __ subs(R0, R1, ShifterOperand(R8));
288  __ adcs(R0, R0, ShifterOperand(R8));
289  __ sbcs(R0, R0, ShifterOperand(R8));
290  __ ands(R0, R0, ShifterOperand(R8));
291  __ orrs(R0, R0, ShifterOperand(R8));
292  __ eors(R0, R0, ShifterOperand(R8));
293  __ bics(R0, R0, ShifterOperand(R8));
294  __ tst(R0, ShifterOperand(R8));
295  __ cmn(R0, ShifterOperand(R8));
296  __ rsbs(R0, R8, ShifterOperand(0));  // Check that this is not emitted as 16-bit.
297  __ rsbs(R8, R8, ShifterOperand(0));  // Check that this is not emitted as 16-bit (Rd == Rn).
298
299  // 32-bit variants of instructions that would be 16-bit outside IT block.
300  __ it(arm::EQ);
301  __ mvns(R0, ShifterOperand(R1), arm::EQ);
302  __ it(arm::EQ);
303  __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
304  __ it(arm::EQ);
305  __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
306  __ it(arm::EQ);
307  __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
308  __ it(arm::EQ);
309  __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
310  __ it(arm::EQ);
311  __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
312  __ it(arm::EQ);
313  __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
314  __ it(arm::EQ);
315  __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
316  __ it(arm::EQ);
317  __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
318
319  // 16-bit variants of instructions that would be 32-bit outside IT block.
320  __ it(arm::EQ);
321  __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
322  __ it(arm::EQ);
323  __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
324  __ it(arm::EQ);
325  __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
326  __ it(arm::EQ);
327  __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
328  __ it(arm::EQ);
329  __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
330  __ it(arm::EQ);
331  __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
332  __ it(arm::EQ);
333  __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
334  __ it(arm::EQ);
335  __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
336  __ it(arm::EQ);
337  __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
338
339  // 16 bit variants selected for the default kCcDontCare.
340  __ mov(R0, ShifterOperand(R1));
341  __ mvn(R0, ShifterOperand(R1));
342  __ add(R0, R0, ShifterOperand(R1));
343  __ add(R0, R1, ShifterOperand(R2));
344  __ sub(R0, R1, ShifterOperand(R2));
345  __ adc(R0, R0, ShifterOperand(R1));
346  __ sbc(R0, R0, ShifterOperand(R1));
347  __ and_(R0, R0, ShifterOperand(R1));
348  __ orr(R0, R0, ShifterOperand(R1));
349  __ eor(R0, R0, ShifterOperand(R1));
350  __ bic(R0, R0, ShifterOperand(R1));
351  __ mov(R1, ShifterOperand(R8));
352  __ mov(R9, ShifterOperand(R0));
353  __ mov(R8, ShifterOperand(R9));
354  __ add(R1, R1, ShifterOperand(R8));
355  __ add(R9, R9, ShifterOperand(R0));
356  __ add(R8, R8, ShifterOperand(R9));
357  __ rsb(R0, R1, ShifterOperand(0));
358  __ rsb(R0, R0, ShifterOperand(0));
359
360  // And an arbitrary 32-bit instruction using IP.
361  __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
362
363  EmitAndCheck(&assembler, "DataProcessingRegister");
364}
365
366TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
367  arm::Thumb2Assembler assembler;
368
369  __ mov(R0, ShifterOperand(0x55));
370  __ mvn(R0, ShifterOperand(0x55));
371  __ add(R0, R1, ShifterOperand(0x55));
372  __ sub(R0, R1, ShifterOperand(0x55));
373  __ and_(R0, R1, ShifterOperand(0x55));
374  __ orr(R0, R1, ShifterOperand(0x55));
375  __ orn(R0, R1, ShifterOperand(0x55));
376  __ eor(R0, R1, ShifterOperand(0x55));
377  __ bic(R0, R1, ShifterOperand(0x55));
378  __ adc(R0, R1, ShifterOperand(0x55));
379  __ sbc(R0, R1, ShifterOperand(0x55));
380  __ rsb(R0, R1, ShifterOperand(0x55));
381
382  __ tst(R0, ShifterOperand(0x55));
383  __ teq(R0, ShifterOperand(0x55));
384  __ cmp(R0, ShifterOperand(0x55));
385  __ cmn(R0, ShifterOperand(0x55));
386
387  __ add(R0, R1, ShifterOperand(5));
388  __ sub(R0, R1, ShifterOperand(5));
389
390  __ movs(R0, ShifterOperand(0x55));
391  __ mvns(R0, ShifterOperand(0x55));
392
393  __ adds(R0, R1, ShifterOperand(5));
394  __ subs(R0, R1, ShifterOperand(5));
395
396  EmitAndCheck(&assembler, "DataProcessingImmediate");
397}
398
399TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
400  arm::Thumb2Assembler assembler;
401
402  __ mov(R0, ShifterOperand(0x550055));
403  __ mvn(R0, ShifterOperand(0x550055));
404  __ add(R0, R1, ShifterOperand(0x550055));
405  __ sub(R0, R1, ShifterOperand(0x550055));
406  __ and_(R0, R1, ShifterOperand(0x550055));
407  __ orr(R0, R1, ShifterOperand(0x550055));
408  __ orn(R0, R1, ShifterOperand(0x550055));
409  __ eor(R0, R1, ShifterOperand(0x550055));
410  __ bic(R0, R1, ShifterOperand(0x550055));
411  __ adc(R0, R1, ShifterOperand(0x550055));
412  __ sbc(R0, R1, ShifterOperand(0x550055));
413  __ rsb(R0, R1, ShifterOperand(0x550055));
414
415  __ tst(R0, ShifterOperand(0x550055));
416  __ teq(R0, ShifterOperand(0x550055));
417  __ cmp(R0, ShifterOperand(0x550055));
418  __ cmn(R0, ShifterOperand(0x550055));
419
420  EmitAndCheck(&assembler, "DataProcessingModifiedImmediate");
421}
422
423
424TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
425  arm::Thumb2Assembler assembler;
426
427  __ mov(R0, ShifterOperand(0x550055));
428  __ mov(R0, ShifterOperand(0x55005500));
429  __ mov(R0, ShifterOperand(0x55555555));
430  __ mov(R0, ShifterOperand(0xd5000000));       // rotated to first position
431  __ mov(R0, ShifterOperand(0x6a000000));       // rotated to second position
432  __ mov(R0, ShifterOperand(0x350));            // rotated to 2nd last position
433  __ mov(R0, ShifterOperand(0x1a8));            // rotated to last position
434
435  EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
436}
437
438TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
439  arm::Thumb2Assembler assembler;
440
441  // 16-bit variants.
442  __ movs(R3, ShifterOperand(R4, LSL, 4));
443  __ movs(R3, ShifterOperand(R4, LSR, 5));
444  __ movs(R3, ShifterOperand(R4, ASR, 6));
445
446  // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
447  __ movs(R3, ShifterOperand(R4, ROR, 7));
448
449  // 32-bit RRX because RRX has no 16-bit version.
450  __ movs(R3, ShifterOperand(R4, RRX));
451
452  // 32 bit variants (not setting condition codes).
453  __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
454  __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
455  __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
456  __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
457  __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
458
459  // 32 bit variants (high registers).
460  __ movs(R8, ShifterOperand(R4, LSL, 4));
461  __ movs(R8, ShifterOperand(R4, LSR, 5));
462  __ movs(R8, ShifterOperand(R4, ASR, 6));
463  __ movs(R8, ShifterOperand(R4, ROR, 7));
464  __ movs(R8, ShifterOperand(R4, RRX));
465
466  EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
467}
468
469
470TEST(Thumb2AssemblerTest, BasicLoad) {
471  arm::Thumb2Assembler assembler;
472
473  __ ldr(R3, Address(R4, 24));
474  __ ldrb(R3, Address(R4, 24));
475  __ ldrh(R3, Address(R4, 24));
476  __ ldrsb(R3, Address(R4, 24));
477  __ ldrsh(R3, Address(R4, 24));
478
479  __ ldr(R3, Address(SP, 24));
480
481  // 32 bit variants
482  __ ldr(R8, Address(R4, 24));
483  __ ldrb(R8, Address(R4, 24));
484  __ ldrh(R8, Address(R4, 24));
485  __ ldrsb(R8, Address(R4, 24));
486  __ ldrsh(R8, Address(R4, 24));
487
488  EmitAndCheck(&assembler, "BasicLoad");
489}
490
491
492TEST(Thumb2AssemblerTest, BasicStore) {
493  arm::Thumb2Assembler assembler;
494
495  __ str(R3, Address(R4, 24));
496  __ strb(R3, Address(R4, 24));
497  __ strh(R3, Address(R4, 24));
498
499  __ str(R3, Address(SP, 24));
500
501  // 32 bit variants.
502  __ str(R8, Address(R4, 24));
503  __ strb(R8, Address(R4, 24));
504  __ strh(R8, Address(R4, 24));
505
506  EmitAndCheck(&assembler, "BasicStore");
507}
508
509TEST(Thumb2AssemblerTest, ComplexLoad) {
510  arm::Thumb2Assembler assembler;
511
512  __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
513  __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
514  __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
515  __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
516  __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
517  __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
518
519  __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
520  __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
521  __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
522  __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
523  __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
524  __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
525
526  __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
527  __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
528  __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
529  __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
530  __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
531  __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
532
533  __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
534  __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
535  __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
536  __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
537  __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
538  __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
539
540  __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
541  __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
542  __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
543  __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
544  __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
545  __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
546
547  EmitAndCheck(&assembler, "ComplexLoad");
548}
549
550
551TEST(Thumb2AssemblerTest, ComplexStore) {
552  arm::Thumb2Assembler assembler;
553
554  __ str(R3, Address(R4, 24, Address::Mode::Offset));
555  __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
556  __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
557  __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
558  __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
559  __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
560
561  __ strb(R3, Address(R4, 24, Address::Mode::Offset));
562  __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
563  __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
564  __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
565  __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
566  __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
567
568  __ strh(R3, Address(R4, 24, Address::Mode::Offset));
569  __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
570  __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
571  __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
572  __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
573  __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
574
575  EmitAndCheck(&assembler, "ComplexStore");
576}
577
578TEST(Thumb2AssemblerTest, NegativeLoadStore) {
579  arm::Thumb2Assembler assembler;
580
581  __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
582  __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
583  __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
584  __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
585  __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
586  __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
587
588  __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
589  __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
590  __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
591  __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
592  __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
593  __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
594
595  __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
596  __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
597  __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
598  __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
599  __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
600  __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
601
602  __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
603  __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
604  __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
605  __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
606  __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
607  __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
608
609  __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
610  __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
611  __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
612  __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
613  __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
614  __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
615
616  __ str(R3, Address(R4, -24, Address::Mode::Offset));
617  __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
618  __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
619  __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
620  __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
621  __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
622
623  __ strb(R3, Address(R4, -24, Address::Mode::Offset));
624  __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
625  __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
626  __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
627  __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
628  __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
629
630  __ strh(R3, Address(R4, -24, Address::Mode::Offset));
631  __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
632  __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
633  __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
634  __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
635  __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
636
637  EmitAndCheck(&assembler, "NegativeLoadStore");
638}
639
640TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) {
641  arm::Thumb2Assembler assembler;
642
643  __ strd(R2, Address(R0, 24, Address::Mode::Offset));
644  __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
645
646  EmitAndCheck(&assembler, "SimpleLoadStoreDual");
647}
648
649TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) {
650  arm::Thumb2Assembler assembler;
651
652  __ strd(R2, Address(R0, 24, Address::Mode::Offset));
653  __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
654  __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
655  __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
656  __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
657  __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
658
659  __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
660  __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
661  __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
662  __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
663  __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
664  __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
665
666  EmitAndCheck(&assembler, "ComplexLoadStoreDual");
667}
668
669TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) {
670  arm::Thumb2Assembler assembler;
671
672  __ strd(R2, Address(R0, -24, Address::Mode::Offset));
673  __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
674  __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
675  __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
676  __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
677  __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
678
679  __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
680  __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
681  __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
682  __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
683  __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
684  __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
685
686  EmitAndCheck(&assembler, "NegativeLoadStoreDual");
687}
688
689TEST(Thumb2AssemblerTest, SimpleBranch) {
690  arm::Thumb2Assembler assembler;
691
692  Label l1;
693  __ mov(R0, ShifterOperand(2));
694  __ Bind(&l1);
695  __ mov(R1, ShifterOperand(1));
696  __ b(&l1);
697  Label l2;
698  __ b(&l2);
699  __ mov(R1, ShifterOperand(2));
700  __ Bind(&l2);
701  __ mov(R0, ShifterOperand(3));
702
703  Label l3;
704  __ mov(R0, ShifterOperand(2));
705  __ Bind(&l3);
706  __ mov(R1, ShifterOperand(1));
707  __ b(&l3, EQ);
708
709  Label l4;
710  __ b(&l4, EQ);
711  __ mov(R1, ShifterOperand(2));
712  __ Bind(&l4);
713  __ mov(R0, ShifterOperand(3));
714
715  // 2 linked labels.
716  Label l5;
717  __ b(&l5);
718  __ mov(R1, ShifterOperand(4));
719  __ b(&l5);
720  __ mov(R1, ShifterOperand(5));
721  __ Bind(&l5);
722  __ mov(R0, ShifterOperand(6));
723
724  EmitAndCheck(&assembler, "SimpleBranch");
725}
726
727TEST(Thumb2AssemblerTest, LongBranch) {
728  arm::Thumb2Assembler assembler;
729  __ Force32Bit();
730  // 32 bit branches.
731  Label l1;
732  __ mov(R0, ShifterOperand(2));
733  __ Bind(&l1);
734  __ mov(R1, ShifterOperand(1));
735  __ b(&l1);
736
737  Label l2;
738  __ b(&l2);
739  __ mov(R1, ShifterOperand(2));
740  __ Bind(&l2);
741  __ mov(R0, ShifterOperand(3));
742
743  Label l3;
744  __ mov(R0, ShifterOperand(2));
745  __ Bind(&l3);
746  __ mov(R1, ShifterOperand(1));
747  __ b(&l3, EQ);
748
749  Label l4;
750  __ b(&l4, EQ);
751  __ mov(R1, ShifterOperand(2));
752  __ Bind(&l4);
753  __ mov(R0, ShifterOperand(3));
754
755  // 2 linked labels.
756  Label l5;
757  __ b(&l5);
758  __ mov(R1, ShifterOperand(4));
759  __ b(&l5);
760  __ mov(R1, ShifterOperand(5));
761  __ Bind(&l5);
762  __ mov(R0, ShifterOperand(6));
763
764  EmitAndCheck(&assembler, "LongBranch");
765}
766
767TEST(Thumb2AssemblerTest, LoadMultiple) {
768  arm::Thumb2Assembler assembler;
769
770  // 16 bit.
771  __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
772
773  // 32 bit.
774  __ ldm(DB_W, R4, (1 << LR | 1 << R11));
775  __ ldm(DB, R4, (1 << LR | 1 << R11));
776
777  // Single reg is converted to ldr
778  __ ldm(DB_W, R4, (1 << R5));
779
780  EmitAndCheck(&assembler, "LoadMultiple");
781}
782
783TEST(Thumb2AssemblerTest, StoreMultiple) {
784  arm::Thumb2Assembler assembler;
785
786  // 16 bit.
787  __ stm(IA_W, R4, (1 << R0 | 1 << R3));
788
789  // 32 bit.
790  __ stm(IA_W, R4, (1 << LR | 1 << R11));
791  __ stm(IA, R4, (1 << LR | 1 << R11));
792
793  // Single reg is converted to str
794  __ stm(IA_W, R4, (1 << R5));
795  __ stm(IA, R4, (1 << R5));
796
797  EmitAndCheck(&assembler, "StoreMultiple");
798}
799
800TEST(Thumb2AssemblerTest, MovWMovT) {
801  arm::Thumb2Assembler assembler;
802
803  __ movw(R4, 0);         // 16 bit.
804  __ movw(R4, 0x34);      // 16 bit.
805  __ movw(R9, 0x34);      // 32 bit due to high register.
806  __ movw(R3, 0x1234);    // 32 bit due to large value.
807  __ movw(R9, 0xffff);    // 32 bit due to large value and high register.
808
809  // Always 32 bit.
810  __ movt(R0, 0);
811  __ movt(R0, 0x1234);
812  __ movt(R1, 0xffff);
813
814  EmitAndCheck(&assembler, "MovWMovT");
815}
816
817TEST(Thumb2AssemblerTest, SpecialAddSub) {
818  arm::Thumb2Assembler assembler;
819
820  __ add(R2, SP, ShifterOperand(0x50));   // 16 bit.
821  __ add(SP, SP, ShifterOperand(0x50));   // 16 bit.
822  __ add(R8, SP, ShifterOperand(0x50));   // 32 bit.
823
824  __ add(R2, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
825  __ add(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
826  __ add(SP, SP, ShifterOperand(0xffc));  // 32 bit due to imm size; encoding T4.
827
828  __ sub(SP, SP, ShifterOperand(0x50));   // 16 bit
829  __ sub(R0, SP, ShifterOperand(0x50));   // 32 bit
830  __ sub(R8, SP, ShifterOperand(0x50));   // 32 bit.
831
832  __ sub(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size
833  __ sub(SP, SP, ShifterOperand(0xffc));  // 32 bit due to imm size; encoding T4.
834
835  EmitAndCheck(&assembler, "SpecialAddSub");
836}
837
838TEST(Thumb2AssemblerTest, LoadFromOffset) {
839  arm::Thumb2Assembler assembler;
840
841  __ LoadFromOffset(kLoadWord, R2, R4, 12);
842  __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
843  __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
844  __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
845  __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
846  __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
847  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
848  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
849  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
850  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
851  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
852  __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
853  __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
854  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
855  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
856  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
857  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
858  __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
859
860  __ LoadFromOffset(kLoadWord, R0, R12, 12);  // 32-bit because of R12.
861  __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
862
863  __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
864  __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
865  __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
866
867  EmitAndCheck(&assembler, "LoadFromOffset");
868}
869
870TEST(Thumb2AssemblerTest, StoreToOffset) {
871  arm::Thumb2Assembler assembler;
872
873  __ StoreToOffset(kStoreWord, R2, R4, 12);
874  __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
875  __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
876  __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
877  __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
878  __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
879  __ StoreToOffset(kStoreHalfword, R2, R4, 12);
880  __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
881  __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
882  __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
883  __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
884  __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
885  __ StoreToOffset(kStoreWordPair, R2, R4, 12);
886  __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
887  __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
888  __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
889  __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
890  __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
891
892  __ StoreToOffset(kStoreWord, R0, R12, 12);  // 32-bit because of R12.
893  __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
894
895  __ StoreToOffset(kStoreByte, R2, R4, 12);
896
897  EmitAndCheck(&assembler, "StoreToOffset");
898}
899
900TEST(Thumb2AssemblerTest, IfThen) {
901  arm::Thumb2Assembler assembler;
902
903  __ it(EQ);
904  __ mov(R1, ShifterOperand(1), EQ);
905
906  __ it(EQ, kItThen);
907  __ mov(R1, ShifterOperand(1), EQ);
908  __ mov(R2, ShifterOperand(2), EQ);
909
910  __ it(EQ, kItElse);
911  __ mov(R1, ShifterOperand(1), EQ);
912  __ mov(R2, ShifterOperand(2), NE);
913
914  __ it(EQ, kItThen, kItElse);
915  __ mov(R1, ShifterOperand(1), EQ);
916  __ mov(R2, ShifterOperand(2), EQ);
917  __ mov(R3, ShifterOperand(3), NE);
918
919  __ it(EQ, kItElse, kItElse);
920  __ mov(R1, ShifterOperand(1), EQ);
921  __ mov(R2, ShifterOperand(2), NE);
922  __ mov(R3, ShifterOperand(3), NE);
923
924  __ it(EQ, kItThen, kItThen, kItElse);
925  __ mov(R1, ShifterOperand(1), EQ);
926  __ mov(R2, ShifterOperand(2), EQ);
927  __ mov(R3, ShifterOperand(3), EQ);
928  __ mov(R4, ShifterOperand(4), NE);
929
930  EmitAndCheck(&assembler, "IfThen");
931}
932
933TEST(Thumb2AssemblerTest, CbzCbnz) {
934  arm::Thumb2Assembler assembler;
935
936  Label l1;
937  __ cbz(R2, &l1);
938  __ mov(R1, ShifterOperand(3));
939  __ mov(R2, ShifterOperand(3));
940  __ Bind(&l1);
941  __ mov(R2, ShifterOperand(4));
942
943  Label l2;
944  __ cbnz(R2, &l2);
945  __ mov(R8, ShifterOperand(3));
946  __ mov(R2, ShifterOperand(3));
947  __ Bind(&l2);
948  __ mov(R2, ShifterOperand(4));
949
950  EmitAndCheck(&assembler, "CbzCbnz");
951}
952
953TEST(Thumb2AssemblerTest, Multiply) {
954  arm::Thumb2Assembler assembler;
955
956  __ mul(R0, R1, R0);
957  __ mul(R0, R1, R2);
958  __ mul(R8, R9, R8);
959  __ mul(R8, R9, R10);
960
961  __ mla(R0, R1, R2, R3);
962  __ mla(R8, R9, R8, R9);
963
964  __ mls(R0, R1, R2, R3);
965  __ mls(R8, R9, R8, R9);
966
967  __ umull(R0, R1, R2, R3);
968  __ umull(R8, R9, R10, R11);
969
970  EmitAndCheck(&assembler, "Multiply");
971}
972
973TEST(Thumb2AssemblerTest, Divide) {
974  arm::Thumb2Assembler assembler;
975
976  __ sdiv(R0, R1, R2);
977  __ sdiv(R8, R9, R10);
978
979  __ udiv(R0, R1, R2);
980  __ udiv(R8, R9, R10);
981
982  EmitAndCheck(&assembler, "Divide");
983}
984
985TEST(Thumb2AssemblerTest, VMov) {
986  arm::Thumb2Assembler assembler;
987
988  __ vmovs(S1, 1.0);
989  __ vmovd(D1, 1.0);
990
991  __ vmovs(S1, S2);
992  __ vmovd(D1, D2);
993
994  EmitAndCheck(&assembler, "VMov");
995}
996
997
998TEST(Thumb2AssemblerTest, BasicFloatingPoint) {
999  arm::Thumb2Assembler assembler;
1000
1001  __ vadds(S0, S1, S2);
1002  __ vsubs(S0, S1, S2);
1003  __ vmuls(S0, S1, S2);
1004  __ vmlas(S0, S1, S2);
1005  __ vmlss(S0, S1, S2);
1006  __ vdivs(S0, S1, S2);
1007  __ vabss(S0, S1);
1008  __ vnegs(S0, S1);
1009  __ vsqrts(S0, S1);
1010
1011  __ vaddd(D0, D1, D2);
1012  __ vsubd(D0, D1, D2);
1013  __ vmuld(D0, D1, D2);
1014  __ vmlad(D0, D1, D2);
1015  __ vmlsd(D0, D1, D2);
1016  __ vdivd(D0, D1, D2);
1017  __ vabsd(D0, D1);
1018  __ vnegd(D0, D1);
1019  __ vsqrtd(D0, D1);
1020
1021  EmitAndCheck(&assembler, "BasicFloatingPoint");
1022}
1023
1024TEST(Thumb2AssemblerTest, FloatingPointConversions) {
1025  arm::Thumb2Assembler assembler;
1026
1027  __ vcvtsd(S2, D2);
1028  __ vcvtds(D2, S2);
1029
1030  __ vcvtis(S1, S2);
1031  __ vcvtsi(S1, S2);
1032
1033  __ vcvtid(S1, D2);
1034  __ vcvtdi(D1, S2);
1035
1036  __ vcvtus(S1, S2);
1037  __ vcvtsu(S1, S2);
1038
1039  __ vcvtud(S1, D2);
1040  __ vcvtdu(D1, S2);
1041
1042  EmitAndCheck(&assembler, "FloatingPointConversions");
1043}
1044
1045TEST(Thumb2AssemblerTest, FloatingPointComparisons) {
1046  arm::Thumb2Assembler assembler;
1047
1048  __ vcmps(S0, S1);
1049  __ vcmpd(D0, D1);
1050
1051  __ vcmpsz(S2);
1052  __ vcmpdz(D2);
1053
1054  EmitAndCheck(&assembler, "FloatingPointComparisons");
1055}
1056
1057TEST(Thumb2AssemblerTest, Calls) {
1058  arm::Thumb2Assembler assembler;
1059
1060  __ blx(LR);
1061  __ bx(LR);
1062
1063  EmitAndCheck(&assembler, "Calls");
1064}
1065
1066TEST(Thumb2AssemblerTest, Breakpoint) {
1067  arm::Thumb2Assembler assembler;
1068
1069  __ bkpt(0);
1070
1071  EmitAndCheck(&assembler, "Breakpoint");
1072}
1073
1074TEST(Thumb2AssemblerTest, StrR1) {
1075  arm::Thumb2Assembler assembler;
1076
1077  __ str(R1, Address(SP, 68));
1078  __ str(R1, Address(SP, 1068));
1079
1080  EmitAndCheck(&assembler, "StrR1");
1081}
1082
1083TEST(Thumb2AssemblerTest, VPushPop) {
1084  arm::Thumb2Assembler assembler;
1085
1086  __ vpushs(S2, 4);
1087  __ vpushd(D2, 4);
1088
1089  __ vpops(S2, 4);
1090  __ vpopd(D2, 4);
1091
1092  EmitAndCheck(&assembler, "VPushPop");
1093}
1094
1095TEST(Thumb2AssemblerTest, Max16BitBranch) {
1096  arm::Thumb2Assembler assembler;
1097
1098  Label l1;
1099  __ b(&l1);
1100  for (int i = 0 ; i < (1 << 11) ; i += 2) {
1101    __ mov(R3, ShifterOperand(i & 0xff));
1102  }
1103  __ Bind(&l1);
1104  __ mov(R1, ShifterOperand(R2));
1105
1106  EmitAndCheck(&assembler, "Max16BitBranch");
1107}
1108
1109TEST(Thumb2AssemblerTest, Branch32) {
1110  arm::Thumb2Assembler assembler;
1111
1112  Label l1;
1113  __ b(&l1);
1114  for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1115    __ mov(R3, ShifterOperand(i & 0xff));
1116  }
1117  __ Bind(&l1);
1118  __ mov(R1, ShifterOperand(R2));
1119
1120  EmitAndCheck(&assembler, "Branch32");
1121}
1122
1123TEST(Thumb2AssemblerTest, CompareAndBranchMax) {
1124  arm::Thumb2Assembler assembler;
1125
1126  Label l1;
1127  __ cbz(R4, &l1);
1128  for (int i = 0 ; i < (1 << 7) ; i += 2) {
1129    __ mov(R3, ShifterOperand(i & 0xff));
1130  }
1131  __ Bind(&l1);
1132  __ mov(R1, ShifterOperand(R2));
1133
1134  EmitAndCheck(&assembler, "CompareAndBranchMax");
1135}
1136
1137TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1138  arm::Thumb2Assembler assembler;
1139
1140  Label l1;
1141  __ cbz(R4, &l1);
1142  for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1143    __ mov(R3, ShifterOperand(i & 0xff));
1144  }
1145  __ Bind(&l1);
1146  __ mov(R1, ShifterOperand(R2));
1147
1148  EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
1149}
1150
1151TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1152  arm::Thumb2Assembler assembler;
1153
1154  Label l1;
1155  __ cbz(R4, &l1);
1156  for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1157    __ mov(R3, ShifterOperand(i & 0xff));
1158  }
1159  __ Bind(&l1);
1160  __ mov(R1, ShifterOperand(R2));
1161
1162  EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
1163}
1164
1165TEST(Thumb2AssemblerTest, MixedBranch32) {
1166  arm::Thumb2Assembler assembler;
1167
1168  Label l1;
1169  Label l2;
1170  __ b(&l1);      // Forwards.
1171  __ Bind(&l2);
1172
1173  // Space to force relocation.
1174  for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1175    __ mov(R3, ShifterOperand(i & 0xff));
1176  }
1177  __ b(&l2);      // Backwards.
1178  __ Bind(&l1);
1179  __ mov(R1, ShifterOperand(R2));
1180
1181  EmitAndCheck(&assembler, "MixedBranch32");
1182}
1183
1184TEST(Thumb2AssemblerTest, Shifts) {
1185  arm::Thumb2Assembler assembler;
1186
1187  // 16 bit selected for CcDontCare.
1188  __ Lsl(R0, R1, 5);
1189  __ Lsr(R0, R1, 5);
1190  __ Asr(R0, R1, 5);
1191
1192  __ Lsl(R0, R0, R1);
1193  __ Lsr(R0, R0, R1);
1194  __ Asr(R0, R0, R1);
1195  __ Ror(R0, R0, R1);
1196
1197  // 16 bit with kCcSet.
1198  __ Lsls(R0, R1, 5);
1199  __ Lsrs(R0, R1, 5);
1200  __ Asrs(R0, R1, 5);
1201
1202  __ Lsls(R0, R0, R1);
1203  __ Lsrs(R0, R0, R1);
1204  __ Asrs(R0, R0, R1);
1205  __ Rors(R0, R0, R1);
1206
1207  // 32-bit with kCcKeep.
1208  __ Lsl(R0, R1, 5, AL, kCcKeep);
1209  __ Lsr(R0, R1, 5, AL, kCcKeep);
1210  __ Asr(R0, R1, 5, AL, kCcKeep);
1211
1212  __ Lsl(R0, R0, R1, AL, kCcKeep);
1213  __ Lsr(R0, R0, R1, AL, kCcKeep);
1214  __ Asr(R0, R0, R1, AL, kCcKeep);
1215  __ Ror(R0, R0, R1, AL, kCcKeep);
1216
1217  // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
1218  __ Ror(R0, R1, 5);
1219  __ Rors(R0, R1, 5);
1220  __ Ror(R0, R1, 5, AL, kCcKeep);
1221
1222  // 32 bit due to high registers.
1223  __ Lsl(R8, R1, 5);
1224  __ Lsr(R0, R8, 5);
1225  __ Asr(R8, R1, 5);
1226  __ Ror(R0, R8, 5);
1227
1228  // 32 bit due to different Rd and Rn.
1229  __ Lsl(R0, R1, R2);
1230  __ Lsr(R0, R1, R2);
1231  __ Asr(R0, R1, R2);
1232  __ Ror(R0, R1, R2);
1233
1234  // 32 bit due to use of high registers.
1235  __ Lsl(R8, R1, R2);
1236  __ Lsr(R0, R8, R2);
1237  __ Asr(R0, R1, R8);
1238
1239  // S bit (all 32 bit)
1240
1241  // 32 bit due to high registers.
1242  __ Lsls(R8, R1, 5);
1243  __ Lsrs(R0, R8, 5);
1244  __ Asrs(R8, R1, 5);
1245  __ Rors(R0, R8, 5);
1246
1247  // 32 bit due to different Rd and Rn.
1248  __ Lsls(R0, R1, R2);
1249  __ Lsrs(R0, R1, R2);
1250  __ Asrs(R0, R1, R2);
1251  __ Rors(R0, R1, R2);
1252
1253  // 32 bit due to use of high registers.
1254  __ Lsls(R8, R1, R2);
1255  __ Lsrs(R0, R8, R2);
1256  __ Asrs(R0, R1, R8);
1257
1258  EmitAndCheck(&assembler, "Shifts");
1259}
1260
1261TEST(Thumb2AssemblerTest, LoadStoreRegOffset) {
1262  arm::Thumb2Assembler assembler;
1263
1264  // 16 bit.
1265  __ ldr(R0, Address(R1, R2));
1266  __ str(R0, Address(R1, R2));
1267
1268  // 32 bit due to shift.
1269  __ ldr(R0, Address(R1, R2, LSL, 1));
1270  __ str(R0, Address(R1, R2, LSL, 1));
1271
1272  __ ldr(R0, Address(R1, R2, LSL, 3));
1273  __ str(R0, Address(R1, R2, LSL, 3));
1274
1275  // 32 bit due to high register use.
1276  __ ldr(R8, Address(R1, R2));
1277  __ str(R8, Address(R1, R2));
1278
1279  __ ldr(R1, Address(R8, R2));
1280  __ str(R2, Address(R8, R2));
1281
1282  __ ldr(R0, Address(R1, R8));
1283  __ str(R0, Address(R1, R8));
1284
1285  EmitAndCheck(&assembler, "LoadStoreRegOffset");
1286}
1287
1288TEST(Thumb2AssemblerTest, LoadStoreLiteral) {
1289  arm::Thumb2Assembler assembler;
1290
1291  __ ldr(R0, Address(4));
1292  __ str(R0, Address(4));
1293
1294  __ ldr(R0, Address(-8));
1295  __ str(R0, Address(-8));
1296
1297  // Limits.
1298  __ ldr(R0, Address(0x3ff));       // 10 bits (16 bit).
1299  __ ldr(R0, Address(0x7ff));       // 11 bits (32 bit).
1300  __ str(R0, Address(0x3ff));       // 32 bit (no 16 bit str(literal)).
1301  __ str(R0, Address(0x7ff));       // 11 bits (32 bit).
1302
1303  EmitAndCheck(&assembler, "LoadStoreLiteral");
1304}
1305
1306TEST(Thumb2AssemblerTest, LoadStoreLimits) {
1307  arm::Thumb2Assembler assembler;
1308
1309  __ ldr(R0, Address(R4, 124));     // 16 bit.
1310  __ ldr(R0, Address(R4, 128));     // 32 bit.
1311
1312  __ ldrb(R0, Address(R4, 31));     // 16 bit.
1313  __ ldrb(R0, Address(R4, 32));     // 32 bit.
1314
1315  __ ldrh(R0, Address(R4, 62));     // 16 bit.
1316  __ ldrh(R0, Address(R4, 64));     // 32 bit.
1317
1318  __ ldrsb(R0, Address(R4, 31));     // 32 bit.
1319  __ ldrsb(R0, Address(R4, 32));     // 32 bit.
1320
1321  __ ldrsh(R0, Address(R4, 62));     // 32 bit.
1322  __ ldrsh(R0, Address(R4, 64));     // 32 bit.
1323
1324  __ str(R0, Address(R4, 124));     // 16 bit.
1325  __ str(R0, Address(R4, 128));     // 32 bit.
1326
1327  __ strb(R0, Address(R4, 31));     // 16 bit.
1328  __ strb(R0, Address(R4, 32));     // 32 bit.
1329
1330  __ strh(R0, Address(R4, 62));     // 16 bit.
1331  __ strh(R0, Address(R4, 64));     // 32 bit.
1332
1333  EmitAndCheck(&assembler, "LoadStoreLimits");
1334}
1335
1336TEST(Thumb2AssemblerTest, CompareAndBranch) {
1337  arm::Thumb2Assembler assembler;
1338
1339  Label label;
1340  __ CompareAndBranchIfZero(arm::R0, &label);
1341  __ CompareAndBranchIfZero(arm::R11, &label);
1342  __ CompareAndBranchIfNonZero(arm::R0, &label);
1343  __ CompareAndBranchIfNonZero(arm::R11, &label);
1344  __ Bind(&label);
1345
1346  EmitAndCheck(&assembler, "CompareAndBranch");
1347}
1348
1349#undef __
1350}  // namespace arm
1351}  // namespace art
1352