mir_analysis.cc revision e4fcc5ba2284c201c022b52d27f7a1201d696324
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <algorithm>
18#include <memory>
19
20#include "base/logging.h"
21#include "dataflow_iterator-inl.h"
22#include "compiler_ir.h"
23#include "dex_flags.h"
24#include "dex_instruction-inl.h"
25#include "dex/mir_field_info.h"
26#include "dex/verified_method.h"
27#include "dex/quick/dex_file_method_inliner.h"
28#include "dex/quick/dex_file_to_method_inliner_map.h"
29#include "driver/compiler_driver.h"
30#include "driver/compiler_options.h"
31#include "driver/dex_compilation_unit.h"
32#include "utils/scoped_arena_containers.h"
33
34namespace art {
35
36enum InstructionAnalysisAttributeOps : uint8_t {
37  kUninterestingOp = 0,
38  kArithmeticOp,
39  kFpOp,
40  kSingleOp,
41  kDoubleOp,
42  kIntOp,
43  kLongOp,
44  kBranchOp,
45  kInvokeOp,
46  kArrayOp,
47  kHeavyweightOp,
48  kSimpleConstOp,
49  kMoveOp,
50  kSwitch
51};
52
53enum InstructionAnalysisAttributeMasks : uint16_t {
54  kAnNone = 1 << kUninterestingOp,
55  kAnMath = 1 << kArithmeticOp,
56  kAnFp = 1 << kFpOp,
57  kAnLong = 1 << kLongOp,
58  kAnInt = 1 << kIntOp,
59  kAnSingle = 1 << kSingleOp,
60  kAnDouble = 1 << kDoubleOp,
61  kAnFloatMath = 1 << kFpOp,
62  kAnBranch = 1 << kBranchOp,
63  kAnInvoke = 1 << kInvokeOp,
64  kAnArrayOp = 1 << kArrayOp,
65  kAnHeavyWeight = 1 << kHeavyweightOp,
66  kAnSimpleConst = 1 << kSimpleConstOp,
67  kAnMove = 1 << kMoveOp,
68  kAnSwitch = 1 << kSwitch,
69  kAnComputational = kAnMath | kAnArrayOp | kAnMove | kAnSimpleConst,
70};
71
72// Instruction characteristics used to statically identify computation-intensive methods.
73static const uint16_t kAnalysisAttributes[kMirOpLast] = {
74  // 00 NOP
75  kAnNone,
76
77  // 01 MOVE vA, vB
78  kAnMove,
79
80  // 02 MOVE_FROM16 vAA, vBBBB
81  kAnMove,
82
83  // 03 MOVE_16 vAAAA, vBBBB
84  kAnMove,
85
86  // 04 MOVE_WIDE vA, vB
87  kAnMove,
88
89  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
90  kAnMove,
91
92  // 06 MOVE_WIDE_16 vAAAA, vBBBB
93  kAnMove,
94
95  // 07 MOVE_OBJECT vA, vB
96  kAnMove,
97
98  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
99  kAnMove,
100
101  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
102  kAnMove,
103
104  // 0A MOVE_RESULT vAA
105  kAnMove,
106
107  // 0B MOVE_RESULT_WIDE vAA
108  kAnMove,
109
110  // 0C MOVE_RESULT_OBJECT vAA
111  kAnMove,
112
113  // 0D MOVE_EXCEPTION vAA
114  kAnMove,
115
116  // 0E RETURN_VOID
117  kAnBranch,
118
119  // 0F RETURN vAA
120  kAnBranch,
121
122  // 10 RETURN_WIDE vAA
123  kAnBranch,
124
125  // 11 RETURN_OBJECT vAA
126  kAnBranch,
127
128  // 12 CONST_4 vA, #+B
129  kAnSimpleConst,
130
131  // 13 CONST_16 vAA, #+BBBB
132  kAnSimpleConst,
133
134  // 14 CONST vAA, #+BBBBBBBB
135  kAnSimpleConst,
136
137  // 15 CONST_HIGH16 VAA, #+BBBB0000
138  kAnSimpleConst,
139
140  // 16 CONST_WIDE_16 vAA, #+BBBB
141  kAnSimpleConst,
142
143  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
144  kAnSimpleConst,
145
146  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
147  kAnSimpleConst,
148
149  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
150  kAnSimpleConst,
151
152  // 1A CONST_STRING vAA, string@BBBB
153  kAnNone,
154
155  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
156  kAnNone,
157
158  // 1C CONST_CLASS vAA, type@BBBB
159  kAnNone,
160
161  // 1D MONITOR_ENTER vAA
162  kAnNone,
163
164  // 1E MONITOR_EXIT vAA
165  kAnNone,
166
167  // 1F CHK_CAST vAA, type@BBBB
168  kAnNone,
169
170  // 20 INSTANCE_OF vA, vB, type@CCCC
171  kAnNone,
172
173  // 21 ARRAY_LENGTH vA, vB
174  kAnArrayOp,
175
176  // 22 NEW_INSTANCE vAA, type@BBBB
177  kAnHeavyWeight,
178
179  // 23 NEW_ARRAY vA, vB, type@CCCC
180  kAnHeavyWeight,
181
182  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
183  kAnHeavyWeight,
184
185  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
186  kAnHeavyWeight,
187
188  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
189  kAnNone,
190
191  // 27 THROW vAA
192  kAnHeavyWeight | kAnBranch,
193
194  // 28 GOTO
195  kAnBranch,
196
197  // 29 GOTO_16
198  kAnBranch,
199
200  // 2A GOTO_32
201  kAnBranch,
202
203  // 2B PACKED_SWITCH vAA, +BBBBBBBB
204  kAnSwitch,
205
206  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
207  kAnSwitch,
208
209  // 2D CMPL_FLOAT vAA, vBB, vCC
210  kAnMath | kAnFp | kAnSingle,
211
212  // 2E CMPG_FLOAT vAA, vBB, vCC
213  kAnMath | kAnFp | kAnSingle,
214
215  // 2F CMPL_DOUBLE vAA, vBB, vCC
216  kAnMath | kAnFp | kAnDouble,
217
218  // 30 CMPG_DOUBLE vAA, vBB, vCC
219  kAnMath | kAnFp | kAnDouble,
220
221  // 31 CMP_LONG vAA, vBB, vCC
222  kAnMath | kAnLong,
223
224  // 32 IF_EQ vA, vB, +CCCC
225  kAnMath | kAnBranch | kAnInt,
226
227  // 33 IF_NE vA, vB, +CCCC
228  kAnMath | kAnBranch | kAnInt,
229
230  // 34 IF_LT vA, vB, +CCCC
231  kAnMath | kAnBranch | kAnInt,
232
233  // 35 IF_GE vA, vB, +CCCC
234  kAnMath | kAnBranch | kAnInt,
235
236  // 36 IF_GT vA, vB, +CCCC
237  kAnMath | kAnBranch | kAnInt,
238
239  // 37 IF_LE vA, vB, +CCCC
240  kAnMath | kAnBranch | kAnInt,
241
242  // 38 IF_EQZ vAA, +BBBB
243  kAnMath | kAnBranch | kAnInt,
244
245  // 39 IF_NEZ vAA, +BBBB
246  kAnMath | kAnBranch | kAnInt,
247
248  // 3A IF_LTZ vAA, +BBBB
249  kAnMath | kAnBranch | kAnInt,
250
251  // 3B IF_GEZ vAA, +BBBB
252  kAnMath | kAnBranch | kAnInt,
253
254  // 3C IF_GTZ vAA, +BBBB
255  kAnMath | kAnBranch | kAnInt,
256
257  // 3D IF_LEZ vAA, +BBBB
258  kAnMath | kAnBranch | kAnInt,
259
260  // 3E UNUSED_3E
261  kAnNone,
262
263  // 3F UNUSED_3F
264  kAnNone,
265
266  // 40 UNUSED_40
267  kAnNone,
268
269  // 41 UNUSED_41
270  kAnNone,
271
272  // 42 UNUSED_42
273  kAnNone,
274
275  // 43 UNUSED_43
276  kAnNone,
277
278  // 44 AGET vAA, vBB, vCC
279  kAnArrayOp,
280
281  // 45 AGET_WIDE vAA, vBB, vCC
282  kAnArrayOp,
283
284  // 46 AGET_OBJECT vAA, vBB, vCC
285  kAnArrayOp,
286
287  // 47 AGET_BOOLEAN vAA, vBB, vCC
288  kAnArrayOp,
289
290  // 48 AGET_BYTE vAA, vBB, vCC
291  kAnArrayOp,
292
293  // 49 AGET_CHAR vAA, vBB, vCC
294  kAnArrayOp,
295
296  // 4A AGET_SHORT vAA, vBB, vCC
297  kAnArrayOp,
298
299  // 4B APUT vAA, vBB, vCC
300  kAnArrayOp,
301
302  // 4C APUT_WIDE vAA, vBB, vCC
303  kAnArrayOp,
304
305  // 4D APUT_OBJECT vAA, vBB, vCC
306  kAnArrayOp,
307
308  // 4E APUT_BOOLEAN vAA, vBB, vCC
309  kAnArrayOp,
310
311  // 4F APUT_BYTE vAA, vBB, vCC
312  kAnArrayOp,
313
314  // 50 APUT_CHAR vAA, vBB, vCC
315  kAnArrayOp,
316
317  // 51 APUT_SHORT vAA, vBB, vCC
318  kAnArrayOp,
319
320  // 52 IGET vA, vB, field@CCCC
321  kAnNone,
322
323  // 53 IGET_WIDE vA, vB, field@CCCC
324  kAnNone,
325
326  // 54 IGET_OBJECT vA, vB, field@CCCC
327  kAnNone,
328
329  // 55 IGET_BOOLEAN vA, vB, field@CCCC
330  kAnNone,
331
332  // 56 IGET_BYTE vA, vB, field@CCCC
333  kAnNone,
334
335  // 57 IGET_CHAR vA, vB, field@CCCC
336  kAnNone,
337
338  // 58 IGET_SHORT vA, vB, field@CCCC
339  kAnNone,
340
341  // 59 IPUT vA, vB, field@CCCC
342  kAnNone,
343
344  // 5A IPUT_WIDE vA, vB, field@CCCC
345  kAnNone,
346
347  // 5B IPUT_OBJECT vA, vB, field@CCCC
348  kAnNone,
349
350  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
351  kAnNone,
352
353  // 5D IPUT_BYTE vA, vB, field@CCCC
354  kAnNone,
355
356  // 5E IPUT_CHAR vA, vB, field@CCCC
357  kAnNone,
358
359  // 5F IPUT_SHORT vA, vB, field@CCCC
360  kAnNone,
361
362  // 60 SGET vAA, field@BBBB
363  kAnNone,
364
365  // 61 SGET_WIDE vAA, field@BBBB
366  kAnNone,
367
368  // 62 SGET_OBJECT vAA, field@BBBB
369  kAnNone,
370
371  // 63 SGET_BOOLEAN vAA, field@BBBB
372  kAnNone,
373
374  // 64 SGET_BYTE vAA, field@BBBB
375  kAnNone,
376
377  // 65 SGET_CHAR vAA, field@BBBB
378  kAnNone,
379
380  // 66 SGET_SHORT vAA, field@BBBB
381  kAnNone,
382
383  // 67 SPUT vAA, field@BBBB
384  kAnNone,
385
386  // 68 SPUT_WIDE vAA, field@BBBB
387  kAnNone,
388
389  // 69 SPUT_OBJECT vAA, field@BBBB
390  kAnNone,
391
392  // 6A SPUT_BOOLEAN vAA, field@BBBB
393  kAnNone,
394
395  // 6B SPUT_BYTE vAA, field@BBBB
396  kAnNone,
397
398  // 6C SPUT_CHAR vAA, field@BBBB
399  kAnNone,
400
401  // 6D SPUT_SHORT vAA, field@BBBB
402  kAnNone,
403
404  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
405  kAnInvoke | kAnHeavyWeight,
406
407  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
408  kAnInvoke | kAnHeavyWeight,
409
410  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
411  kAnInvoke | kAnHeavyWeight,
412
413  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
414  kAnInvoke | kAnHeavyWeight,
415
416  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
417  kAnInvoke | kAnHeavyWeight,
418
419  // 73 UNUSED_73
420  kAnNone,
421
422  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
423  kAnInvoke | kAnHeavyWeight,
424
425  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
426  kAnInvoke | kAnHeavyWeight,
427
428  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
429  kAnInvoke | kAnHeavyWeight,
430
431  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
432  kAnInvoke | kAnHeavyWeight,
433
434  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
435  kAnInvoke | kAnHeavyWeight,
436
437  // 79 UNUSED_79
438  kAnNone,
439
440  // 7A UNUSED_7A
441  kAnNone,
442
443  // 7B NEG_INT vA, vB
444  kAnMath | kAnInt,
445
446  // 7C NOT_INT vA, vB
447  kAnMath | kAnInt,
448
449  // 7D NEG_LONG vA, vB
450  kAnMath | kAnLong,
451
452  // 7E NOT_LONG vA, vB
453  kAnMath | kAnLong,
454
455  // 7F NEG_FLOAT vA, vB
456  kAnMath | kAnFp | kAnSingle,
457
458  // 80 NEG_DOUBLE vA, vB
459  kAnMath | kAnFp | kAnDouble,
460
461  // 81 INT_TO_LONG vA, vB
462  kAnMath | kAnInt | kAnLong,
463
464  // 82 INT_TO_FLOAT vA, vB
465  kAnMath | kAnFp | kAnInt | kAnSingle,
466
467  // 83 INT_TO_DOUBLE vA, vB
468  kAnMath | kAnFp | kAnInt | kAnDouble,
469
470  // 84 LONG_TO_INT vA, vB
471  kAnMath | kAnInt | kAnLong,
472
473  // 85 LONG_TO_FLOAT vA, vB
474  kAnMath | kAnFp | kAnLong | kAnSingle,
475
476  // 86 LONG_TO_DOUBLE vA, vB
477  kAnMath | kAnFp | kAnLong | kAnDouble,
478
479  // 87 FLOAT_TO_INT vA, vB
480  kAnMath | kAnFp | kAnInt | kAnSingle,
481
482  // 88 FLOAT_TO_LONG vA, vB
483  kAnMath | kAnFp | kAnLong | kAnSingle,
484
485  // 89 FLOAT_TO_DOUBLE vA, vB
486  kAnMath | kAnFp | kAnSingle | kAnDouble,
487
488  // 8A DOUBLE_TO_INT vA, vB
489  kAnMath | kAnFp | kAnInt | kAnDouble,
490
491  // 8B DOUBLE_TO_LONG vA, vB
492  kAnMath | kAnFp | kAnLong | kAnDouble,
493
494  // 8C DOUBLE_TO_FLOAT vA, vB
495  kAnMath | kAnFp | kAnSingle | kAnDouble,
496
497  // 8D INT_TO_BYTE vA, vB
498  kAnMath | kAnInt,
499
500  // 8E INT_TO_CHAR vA, vB
501  kAnMath | kAnInt,
502
503  // 8F INT_TO_SHORT vA, vB
504  kAnMath | kAnInt,
505
506  // 90 ADD_INT vAA, vBB, vCC
507  kAnMath | kAnInt,
508
509  // 91 SUB_INT vAA, vBB, vCC
510  kAnMath | kAnInt,
511
512  // 92 MUL_INT vAA, vBB, vCC
513  kAnMath | kAnInt,
514
515  // 93 DIV_INT vAA, vBB, vCC
516  kAnMath | kAnInt,
517
518  // 94 REM_INT vAA, vBB, vCC
519  kAnMath | kAnInt,
520
521  // 95 AND_INT vAA, vBB, vCC
522  kAnMath | kAnInt,
523
524  // 96 OR_INT vAA, vBB, vCC
525  kAnMath | kAnInt,
526
527  // 97 XOR_INT vAA, vBB, vCC
528  kAnMath | kAnInt,
529
530  // 98 SHL_INT vAA, vBB, vCC
531  kAnMath | kAnInt,
532
533  // 99 SHR_INT vAA, vBB, vCC
534  kAnMath | kAnInt,
535
536  // 9A USHR_INT vAA, vBB, vCC
537  kAnMath | kAnInt,
538
539  // 9B ADD_LONG vAA, vBB, vCC
540  kAnMath | kAnLong,
541
542  // 9C SUB_LONG vAA, vBB, vCC
543  kAnMath | kAnLong,
544
545  // 9D MUL_LONG vAA, vBB, vCC
546  kAnMath | kAnLong,
547
548  // 9E DIV_LONG vAA, vBB, vCC
549  kAnMath | kAnLong,
550
551  // 9F REM_LONG vAA, vBB, vCC
552  kAnMath | kAnLong,
553
554  // A0 AND_LONG vAA, vBB, vCC
555  kAnMath | kAnLong,
556
557  // A1 OR_LONG vAA, vBB, vCC
558  kAnMath | kAnLong,
559
560  // A2 XOR_LONG vAA, vBB, vCC
561  kAnMath | kAnLong,
562
563  // A3 SHL_LONG vAA, vBB, vCC
564  kAnMath | kAnLong,
565
566  // A4 SHR_LONG vAA, vBB, vCC
567  kAnMath | kAnLong,
568
569  // A5 USHR_LONG vAA, vBB, vCC
570  kAnMath | kAnLong,
571
572  // A6 ADD_FLOAT vAA, vBB, vCC
573  kAnMath | kAnFp | kAnSingle,
574
575  // A7 SUB_FLOAT vAA, vBB, vCC
576  kAnMath | kAnFp | kAnSingle,
577
578  // A8 MUL_FLOAT vAA, vBB, vCC
579  kAnMath | kAnFp | kAnSingle,
580
581  // A9 DIV_FLOAT vAA, vBB, vCC
582  kAnMath | kAnFp | kAnSingle,
583
584  // AA REM_FLOAT vAA, vBB, vCC
585  kAnMath | kAnFp | kAnSingle,
586
587  // AB ADD_DOUBLE vAA, vBB, vCC
588  kAnMath | kAnFp | kAnDouble,
589
590  // AC SUB_DOUBLE vAA, vBB, vCC
591  kAnMath | kAnFp | kAnDouble,
592
593  // AD MUL_DOUBLE vAA, vBB, vCC
594  kAnMath | kAnFp | kAnDouble,
595
596  // AE DIV_DOUBLE vAA, vBB, vCC
597  kAnMath | kAnFp | kAnDouble,
598
599  // AF REM_DOUBLE vAA, vBB, vCC
600  kAnMath | kAnFp | kAnDouble,
601
602  // B0 ADD_INT_2ADDR vA, vB
603  kAnMath | kAnInt,
604
605  // B1 SUB_INT_2ADDR vA, vB
606  kAnMath | kAnInt,
607
608  // B2 MUL_INT_2ADDR vA, vB
609  kAnMath | kAnInt,
610
611  // B3 DIV_INT_2ADDR vA, vB
612  kAnMath | kAnInt,
613
614  // B4 REM_INT_2ADDR vA, vB
615  kAnMath | kAnInt,
616
617  // B5 AND_INT_2ADDR vA, vB
618  kAnMath | kAnInt,
619
620  // B6 OR_INT_2ADDR vA, vB
621  kAnMath | kAnInt,
622
623  // B7 XOR_INT_2ADDR vA, vB
624  kAnMath | kAnInt,
625
626  // B8 SHL_INT_2ADDR vA, vB
627  kAnMath | kAnInt,
628
629  // B9 SHR_INT_2ADDR vA, vB
630  kAnMath | kAnInt,
631
632  // BA USHR_INT_2ADDR vA, vB
633  kAnMath | kAnInt,
634
635  // BB ADD_LONG_2ADDR vA, vB
636  kAnMath | kAnLong,
637
638  // BC SUB_LONG_2ADDR vA, vB
639  kAnMath | kAnLong,
640
641  // BD MUL_LONG_2ADDR vA, vB
642  kAnMath | kAnLong,
643
644  // BE DIV_LONG_2ADDR vA, vB
645  kAnMath | kAnLong,
646
647  // BF REM_LONG_2ADDR vA, vB
648  kAnMath | kAnLong,
649
650  // C0 AND_LONG_2ADDR vA, vB
651  kAnMath | kAnLong,
652
653  // C1 OR_LONG_2ADDR vA, vB
654  kAnMath | kAnLong,
655
656  // C2 XOR_LONG_2ADDR vA, vB
657  kAnMath | kAnLong,
658
659  // C3 SHL_LONG_2ADDR vA, vB
660  kAnMath | kAnLong,
661
662  // C4 SHR_LONG_2ADDR vA, vB
663  kAnMath | kAnLong,
664
665  // C5 USHR_LONG_2ADDR vA, vB
666  kAnMath | kAnLong,
667
668  // C6 ADD_FLOAT_2ADDR vA, vB
669  kAnMath | kAnFp | kAnSingle,
670
671  // C7 SUB_FLOAT_2ADDR vA, vB
672  kAnMath | kAnFp | kAnSingle,
673
674  // C8 MUL_FLOAT_2ADDR vA, vB
675  kAnMath | kAnFp | kAnSingle,
676
677  // C9 DIV_FLOAT_2ADDR vA, vB
678  kAnMath | kAnFp | kAnSingle,
679
680  // CA REM_FLOAT_2ADDR vA, vB
681  kAnMath | kAnFp | kAnSingle,
682
683  // CB ADD_DOUBLE_2ADDR vA, vB
684  kAnMath | kAnFp | kAnDouble,
685
686  // CC SUB_DOUBLE_2ADDR vA, vB
687  kAnMath | kAnFp | kAnDouble,
688
689  // CD MUL_DOUBLE_2ADDR vA, vB
690  kAnMath | kAnFp | kAnDouble,
691
692  // CE DIV_DOUBLE_2ADDR vA, vB
693  kAnMath | kAnFp | kAnDouble,
694
695  // CF REM_DOUBLE_2ADDR vA, vB
696  kAnMath | kAnFp | kAnDouble,
697
698  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
699  kAnMath | kAnInt,
700
701  // D1 RSUB_INT vA, vB, #+CCCC
702  kAnMath | kAnInt,
703
704  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
705  kAnMath | kAnInt,
706
707  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
708  kAnMath | kAnInt,
709
710  // D4 REM_INT_LIT16 vA, vB, #+CCCC
711  kAnMath | kAnInt,
712
713  // D5 AND_INT_LIT16 vA, vB, #+CCCC
714  kAnMath | kAnInt,
715
716  // D6 OR_INT_LIT16 vA, vB, #+CCCC
717  kAnMath | kAnInt,
718
719  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
720  kAnMath | kAnInt,
721
722  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
723  kAnMath | kAnInt,
724
725  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
726  kAnMath | kAnInt,
727
728  // DA MUL_INT_LIT8 vAA, vBB, #+CC
729  kAnMath | kAnInt,
730
731  // DB DIV_INT_LIT8 vAA, vBB, #+CC
732  kAnMath | kAnInt,
733
734  // DC REM_INT_LIT8 vAA, vBB, #+CC
735  kAnMath | kAnInt,
736
737  // DD AND_INT_LIT8 vAA, vBB, #+CC
738  kAnMath | kAnInt,
739
740  // DE OR_INT_LIT8 vAA, vBB, #+CC
741  kAnMath | kAnInt,
742
743  // DF XOR_INT_LIT8 vAA, vBB, #+CC
744  kAnMath | kAnInt,
745
746  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
747  kAnMath | kAnInt,
748
749  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
750  kAnMath | kAnInt,
751
752  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
753  kAnMath | kAnInt,
754
755  // E3 IGET_VOLATILE
756  kAnNone,
757
758  // E4 IPUT_VOLATILE
759  kAnNone,
760
761  // E5 SGET_VOLATILE
762  kAnNone,
763
764  // E6 SPUT_VOLATILE
765  kAnNone,
766
767  // E7 IGET_OBJECT_VOLATILE
768  kAnNone,
769
770  // E8 IGET_WIDE_VOLATILE
771  kAnNone,
772
773  // E9 IPUT_WIDE_VOLATILE
774  kAnNone,
775
776  // EA SGET_WIDE_VOLATILE
777  kAnNone,
778
779  // EB SPUT_WIDE_VOLATILE
780  kAnNone,
781
782  // EC BREAKPOINT
783  kAnNone,
784
785  // ED THROW_VERIFICATION_ERROR
786  kAnHeavyWeight | kAnBranch,
787
788  // EE EXECUTE_INLINE
789  kAnNone,
790
791  // EF EXECUTE_INLINE_RANGE
792  kAnNone,
793
794  // F0 INVOKE_OBJECT_INIT_RANGE
795  kAnInvoke | kAnHeavyWeight,
796
797  // F1 RETURN_VOID_BARRIER
798  kAnBranch,
799
800  // F2 IGET_QUICK
801  kAnNone,
802
803  // F3 IGET_WIDE_QUICK
804  kAnNone,
805
806  // F4 IGET_OBJECT_QUICK
807  kAnNone,
808
809  // F5 IPUT_QUICK
810  kAnNone,
811
812  // F6 IPUT_WIDE_QUICK
813  kAnNone,
814
815  // F7 IPUT_OBJECT_QUICK
816  kAnNone,
817
818  // F8 INVOKE_VIRTUAL_QUICK
819  kAnInvoke | kAnHeavyWeight,
820
821  // F9 INVOKE_VIRTUAL_QUICK_RANGE
822  kAnInvoke | kAnHeavyWeight,
823
824  // FA INVOKE_SUPER_QUICK
825  kAnInvoke | kAnHeavyWeight,
826
827  // FB INVOKE_SUPER_QUICK_RANGE
828  kAnInvoke | kAnHeavyWeight,
829
830  // FC IPUT_OBJECT_VOLATILE
831  kAnNone,
832
833  // FD SGET_OBJECT_VOLATILE
834  kAnNone,
835
836  // FE SPUT_OBJECT_VOLATILE
837  kAnNone,
838
839  // FF UNUSED_FF
840  kAnNone,
841
842  // Beginning of extended MIR opcodes
843  // 100 MIR_PHI
844  kAnNone,
845
846  // 101 MIR_COPY
847  kAnNone,
848
849  // 102 MIR_FUSED_CMPL_FLOAT
850  kAnNone,
851
852  // 103 MIR_FUSED_CMPG_FLOAT
853  kAnNone,
854
855  // 104 MIR_FUSED_CMPL_DOUBLE
856  kAnNone,
857
858  // 105 MIR_FUSED_CMPG_DOUBLE
859  kAnNone,
860
861  // 106 MIR_FUSED_CMP_LONG
862  kAnNone,
863
864  // 107 MIR_NOP
865  kAnNone,
866
867  // 108 MIR_NULL_CHECK
868  kAnNone,
869
870  // 109 MIR_RANGE_CHECK
871  kAnNone,
872
873  // 10A MIR_DIV_ZERO_CHECK
874  kAnNone,
875
876  // 10B MIR_CHECK
877  kAnNone,
878
879  // 10C MIR_CHECKPART2
880  kAnNone,
881
882  // 10D MIR_SELECT
883  kAnNone,
884
885  // 10E MirOpConstVector
886  kAnNone,
887
888  // 10F MirOpMoveVector
889  kAnNone,
890
891  // 110 MirOpPackedMultiply
892  kAnNone,
893
894  // 111 MirOpPackedAddition
895  kAnNone,
896
897  // 112 MirOpPackedSubtract
898  kAnNone,
899
900  // 113 MirOpPackedShiftLeft
901  kAnNone,
902
903  // 114 MirOpPackedSignedShiftRight
904  kAnNone,
905
906  // 115 MirOpPackedUnsignedShiftRight
907  kAnNone,
908
909  // 116 MirOpPackedAnd
910  kAnNone,
911
912  // 117 MirOpPackedOr
913  kAnNone,
914
915  // 118 MirOpPackedXor
916  kAnNone,
917
918  // 119 MirOpPackedAddReduce
919  kAnNone,
920
921  // 11A MirOpPackedReduce
922  kAnNone,
923
924  // 11B MirOpPackedSet
925  kAnNone,
926
927  // 11C MirOpReserveVectorRegisters
928  kAnNone,
929
930  // 11D MirOpReturnVectorRegisters
931  kAnNone,
932
933  // 11E MirOpMemBarrier
934  kAnNone,
935
936  // 11F MirOpPackedArrayGet
937  kAnArrayOp,
938
939  // 120 MirOpPackedArrayPut
940  kAnArrayOp,
941};
942
943struct MethodStats {
944  int dex_instructions;
945  int math_ops;
946  int fp_ops;
947  int array_ops;
948  int branch_ops;
949  int heavyweight_ops;
950  bool has_computational_loop;
951  bool has_switch;
952  float math_ratio;
953  float fp_ratio;
954  float array_ratio;
955  float branch_ratio;
956  float heavyweight_ratio;
957};
958
959void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
960  if (bb->visited || (bb->block_type != kDalvikByteCode)) {
961    return;
962  }
963  bool computational_block = true;
964  bool has_math = false;
965  /*
966   * For the purposes of this scan, we want to treat the set of basic blocks broken
967   * by an exception edge as a single basic block.  We'll scan forward along the fallthrough
968   * edges until we reach an explicit branch or return.
969   */
970  BasicBlock* ending_bb = bb;
971  if (ending_bb->last_mir_insn != NULL) {
972    uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
973    while ((ending_flags & kAnBranch) == 0) {
974      ending_bb = GetBasicBlock(ending_bb->fall_through);
975      ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
976    }
977  }
978  /*
979   * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
980   * first need to do some expensive loop detection - and the point of this is to make
981   * an informed guess before investing in computation.  However, we can cheaply detect
982   * many simple loop forms without having to do full dataflow analysis.
983   */
984  int loop_scale_factor = 1;
985  // Simple for and while loops
986  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
987    if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
988        (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
989      loop_scale_factor = 25;
990    }
991  }
992  // Simple do-while loop
993  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
994    loop_scale_factor = 25;
995  }
996
997  BasicBlock* tbb = bb;
998  bool done = false;
999  while (!done) {
1000    tbb->visited = true;
1001    for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
1002      if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
1003        // Skip any MIR pseudo-op.
1004        continue;
1005      }
1006      uint16_t flags = kAnalysisAttributes[mir->dalvikInsn.opcode];
1007      stats->dex_instructions += loop_scale_factor;
1008      if ((flags & kAnBranch) == 0) {
1009        computational_block &= ((flags & kAnComputational) != 0);
1010      } else {
1011        stats->branch_ops += loop_scale_factor;
1012      }
1013      if ((flags & kAnMath) != 0) {
1014        stats->math_ops += loop_scale_factor;
1015        has_math = true;
1016      }
1017      if ((flags & kAnFp) != 0) {
1018        stats->fp_ops += loop_scale_factor;
1019      }
1020      if ((flags & kAnArrayOp) != 0) {
1021        stats->array_ops += loop_scale_factor;
1022      }
1023      if ((flags & kAnHeavyWeight) != 0) {
1024        stats->heavyweight_ops += loop_scale_factor;
1025      }
1026      if ((flags & kAnSwitch) != 0) {
1027        stats->has_switch = true;
1028      }
1029    }
1030    if (tbb == ending_bb) {
1031      done = true;
1032    } else {
1033      tbb = GetBasicBlock(tbb->fall_through);
1034    }
1035  }
1036  if (has_math && computational_block && (loop_scale_factor > 1)) {
1037    stats->has_computational_loop = true;
1038  }
1039}
1040
1041bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
1042                                      std::string* skip_message) {
1043  float count = stats->dex_instructions;
1044  stats->math_ratio = stats->math_ops / count;
1045  stats->fp_ratio = stats->fp_ops / count;
1046  stats->branch_ratio = stats->branch_ops / count;
1047  stats->array_ratio = stats->array_ops / count;
1048  stats->heavyweight_ratio = stats->heavyweight_ops / count;
1049
1050  if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
1051    LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
1052              << stats->math_ratio << ", fp:"
1053              << stats->fp_ratio << ", br:"
1054              << stats->branch_ratio << ", hw:"
1055              << stats->heavyweight_ratio << ", arr:"
1056              << stats->array_ratio << ", hot:"
1057              << stats->has_computational_loop << ", "
1058              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1059  }
1060
1061  // Computation intensive?
1062  if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
1063    return false;
1064  }
1065
1066  // Complex, logic-intensive?
1067  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
1068      stats->branch_ratio > 0.3) {
1069    return false;
1070  }
1071
1072  // Significant floating point?
1073  if (stats->fp_ratio > 0.05) {
1074    return false;
1075  }
1076
1077  // Significant generic math?
1078  if (stats->math_ratio > 0.3) {
1079    return false;
1080  }
1081
1082  // If array-intensive, compiling is probably worthwhile.
1083  if (stats->array_ratio > 0.1) {
1084    return false;
1085  }
1086
1087  // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
1088  if (stats->has_switch) {
1089    return false;
1090  }
1091
1092  // If significant in size and high proportion of expensive operations, skip.
1093  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
1094      (stats->heavyweight_ratio > 0.3)) {
1095    *skip_message = "Is a small method with heavyweight ratio " +
1096                    std::to_string(stats->heavyweight_ratio);
1097    return true;
1098  }
1099
1100  return skip_default;
1101}
1102
1103 /*
1104  * Will eventually want this to be a bit more sophisticated and happen at verification time.
1105  */
1106bool MIRGraph::SkipCompilation(std::string* skip_message) {
1107  const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
1108  CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
1109  if (compiler_filter == CompilerOptions::kEverything) {
1110    return false;
1111  }
1112
1113  // Contains a pattern we don't want to compile?
1114  if (PuntToInterpreter()) {
1115    *skip_message = "Punt to interpreter set";
1116    return true;
1117  }
1118
1119  DCHECK(compiler_options.IsCompilationEnabled());
1120
1121  // Set up compilation cutoffs based on current filter mode.
1122  size_t small_cutoff;
1123  size_t default_cutoff;
1124  switch (compiler_filter) {
1125    case CompilerOptions::kBalanced:
1126      small_cutoff = compiler_options.GetSmallMethodThreshold();
1127      default_cutoff = compiler_options.GetLargeMethodThreshold();
1128      break;
1129    case CompilerOptions::kSpace:
1130      small_cutoff = compiler_options.GetTinyMethodThreshold();
1131      default_cutoff = compiler_options.GetSmallMethodThreshold();
1132      break;
1133    case CompilerOptions::kSpeed:
1134    case CompilerOptions::kTime:
1135      small_cutoff = compiler_options.GetHugeMethodThreshold();
1136      default_cutoff = compiler_options.GetHugeMethodThreshold();
1137      break;
1138    default:
1139      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
1140      UNREACHABLE();
1141  }
1142
1143  // If size < cutoff, assume we'll compile - but allow removal.
1144  bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
1145  if (skip_compilation) {
1146    *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
1147  }
1148
1149  /*
1150   * Filter 1: Huge methods are likely to be machine generated, but some aren't.
1151   * If huge, assume we won't compile, but allow futher analysis to turn it back on.
1152   */
1153  if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
1154    skip_compilation = true;
1155    *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
1156    // If we're got a huge number of basic blocks, don't bother with further analysis.
1157    if (static_cast<size_t>(GetNumBlocks()) > (compiler_options.GetHugeMethodThreshold() / 2)) {
1158      return true;
1159    }
1160  } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
1161    /* If it's large and contains no branches, it's likely to be machine generated initialization */
1162      (GetBranchCount() == 0)) {
1163    *skip_message = "Large method with no branches";
1164    return true;
1165  } else if (compiler_filter == CompilerOptions::kSpeed) {
1166    // If not huge, compile.
1167    return false;
1168  }
1169
1170  // Filter 2: Skip class initializers.
1171  if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
1172    *skip_message = "Class initializer";
1173    return true;
1174  }
1175
1176  // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
1177  if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
1178      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1179          ->IsSpecial(cu_->method_idx)) {
1180    return false;
1181  }
1182
1183  // Filter 4: if small, just compile.
1184  if (GetNumDalvikInsns() < small_cutoff) {
1185    return false;
1186  }
1187
1188  // Analyze graph for:
1189  //  o floating point computation
1190  //  o basic blocks contained in loop with heavy arithmetic.
1191  //  o proportion of conditional branches.
1192
1193  MethodStats stats;
1194  memset(&stats, 0, sizeof(stats));
1195
1196  ClearAllVisitedFlags();
1197  AllNodesIterator iter(this);
1198  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1199    AnalyzeBlock(bb, &stats);
1200  }
1201
1202  return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
1203}
1204
1205void MIRGraph::DoCacheFieldLoweringInfo() {
1206  // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
1207  const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
1208  ScopedArenaAllocator allocator(&cu_->arena_stack);
1209  uint16_t* field_idxs = allocator.AllocArray<uint16_t>(max_refs, kArenaAllocMisc);
1210  DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(max_refs, kArenaAllocMisc);
1211
1212  // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
1213  size_t ifield_pos = 0u;
1214  size_t sfield_pos = max_refs;
1215  AllNodesIterator iter(this);
1216  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1217    if (bb->block_type != kDalvikByteCode) {
1218      continue;
1219    }
1220    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1221      // Get field index and try to find it among existing indexes. If found, it's usually among
1222      // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
1223      // is a linear search, it actually performs much better than map based approach.
1224      if (IsInstructionIGetOrIPut(mir->dalvikInsn.opcode)) {
1225        uint16_t field_idx = mir->dalvikInsn.vC;
1226        size_t i = ifield_pos;
1227        while (i != 0u && field_idxs[i - 1] != field_idx) {
1228          --i;
1229        }
1230        if (i != 0u) {
1231          mir->meta.ifield_lowering_info = i - 1;
1232          DCHECK_EQ(field_types[i - 1], IGetOrIPutMemAccessType(mir->dalvikInsn.opcode));
1233        } else {
1234          mir->meta.ifield_lowering_info = ifield_pos;
1235          field_idxs[ifield_pos] = field_idx;
1236          field_types[ifield_pos] = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
1237          ++ifield_pos;
1238        }
1239      } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
1240        uint16_t field_idx = mir->dalvikInsn.vB;
1241        size_t i = sfield_pos;
1242        while (i != max_refs && field_idxs[i] != field_idx) {
1243          ++i;
1244        }
1245        if (i != max_refs) {
1246          mir->meta.sfield_lowering_info = max_refs - i - 1u;
1247          DCHECK_EQ(field_types[i], SGetOrSPutMemAccessType(mir->dalvikInsn.opcode));
1248        } else {
1249          mir->meta.sfield_lowering_info = max_refs - sfield_pos;
1250          --sfield_pos;
1251          field_idxs[sfield_pos] = field_idx;
1252          field_types[sfield_pos] = SGetOrSPutMemAccessType(mir->dalvikInsn.opcode);
1253        }
1254      }
1255      DCHECK_LE(ifield_pos, sfield_pos);
1256    }
1257  }
1258
1259  if (ifield_pos != 0u) {
1260    // Resolve instance field infos.
1261    DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
1262    ifield_lowering_infos_.reserve(ifield_pos);
1263    for (size_t pos = 0u; pos != ifield_pos; ++pos) {
1264      ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos], field_types[pos]));
1265    }
1266    MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1267                                   ifield_lowering_infos_.data(), ifield_pos);
1268  }
1269
1270  if (sfield_pos != max_refs) {
1271    // Resolve static field infos.
1272    DCHECK_EQ(sfield_lowering_infos_.size(), 0u);
1273    sfield_lowering_infos_.reserve(max_refs - sfield_pos);
1274    for (size_t pos = max_refs; pos != sfield_pos;) {
1275      --pos;
1276      sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos], field_types[pos]));
1277    }
1278    MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1279                                   sfield_lowering_infos_.data(), max_refs - sfield_pos);
1280  }
1281}
1282
1283void MIRGraph::DoCacheMethodLoweringInfo() {
1284  static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
1285
1286  // Embed the map value in the entry to avoid extra padding in 64-bit builds.
1287  struct MapEntry {
1288    // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
1289    const MethodReference* devirt_target;
1290    uint16_t target_method_idx;
1291    uint16_t invoke_type;
1292    // Map value.
1293    uint32_t lowering_info_index;
1294  };
1295
1296  // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
1297  struct MapEntryComparator {
1298    bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1299      if (lhs.target_method_idx != rhs.target_method_idx) {
1300        return lhs.target_method_idx < rhs.target_method_idx;
1301      }
1302      if (lhs.invoke_type != rhs.invoke_type) {
1303        return lhs.invoke_type < rhs.invoke_type;
1304      }
1305      if (lhs.devirt_target != rhs.devirt_target) {
1306        if (lhs.devirt_target == nullptr) {
1307          return true;
1308        }
1309        if (rhs.devirt_target == nullptr) {
1310          return false;
1311        }
1312        return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
1313      }
1314      return false;
1315    }
1316    MethodReferenceComparator devirt_cmp;
1317  };
1318
1319  ScopedArenaAllocator allocator(&cu_->arena_stack);
1320
1321  // All INVOKE instructions take 3 code units and there must also be a RETURN.
1322  uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
1323
1324  // Map invoke key (see MapEntry) to lowering info index and vice versa.
1325  // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
1326  // multi_index_container with one ordered index and one sequential index.
1327  ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
1328                                                          allocator.Adapter());
1329  const MapEntry** sequential_entries =
1330      allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
1331
1332  // Find INVOKE insns and their devirtualization targets.
1333  AllNodesIterator iter(this);
1334  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1335    if (bb->block_type != kDalvikByteCode) {
1336      continue;
1337    }
1338    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1339      if (IsInstructionInvoke(mir->dalvikInsn.opcode)) {
1340        // Decode target method index and invoke type.
1341        uint16_t target_method_idx = mir->dalvikInsn.vB;
1342        DexInvokeType invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
1343
1344        // Find devirtualization target.
1345        // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
1346        // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
1347        // and increment it as needed instead of making O(log n) lookups.
1348        const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
1349        const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
1350
1351        // Try to insert a new entry. If the insertion fails, we will have found an old one.
1352        MapEntry entry = {
1353            devirt_target,
1354            target_method_idx,
1355            invoke_types[invoke_type_idx],
1356            static_cast<uint32_t>(invoke_map.size())
1357        };
1358        auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
1359        mir->meta.method_lowering_info = it->lowering_info_index;
1360        // If we didn't actually insert, this will just overwrite an existing value with the same.
1361        sequential_entries[it->lowering_info_index] = &*it;
1362      }
1363    }
1364  }
1365
1366  if (invoke_map.empty()) {
1367    return;
1368  }
1369
1370  // Prepare unique method infos, set method info indexes for their MIRs.
1371  DCHECK_EQ(method_lowering_infos_.size(), 0u);
1372  const size_t count = invoke_map.size();
1373  method_lowering_infos_.reserve(count);
1374  for (size_t pos = 0u; pos != count; ++pos) {
1375    const MapEntry* entry = sequential_entries[pos];
1376    MirMethodLoweringInfo method_info(entry->target_method_idx,
1377                                      static_cast<InvokeType>(entry->invoke_type));
1378    if (entry->devirt_target != nullptr) {
1379      method_info.SetDevirtualizationTarget(*entry->devirt_target);
1380    }
1381    method_lowering_infos_.push_back(method_info);
1382  }
1383  MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1384                                 method_lowering_infos_.data(), count);
1385}
1386
1387bool MIRGraph::SkipCompilationByName(const std::string& methodname) {
1388  return cu_->compiler_driver->SkipCompilation(methodname);
1389}
1390
1391}  // namespace art
1392