mir_analysis.cc revision 2ab40eb3c23559205ac7b9b039bd749458e8a761
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <algorithm>
18#include <memory>
19
20#include "compiler_internals.h"
21#include "dataflow_iterator-inl.h"
22#include "dex_instruction.h"
23#include "dex_instruction-inl.h"
24#include "dex/verified_method.h"
25#include "dex/quick/dex_file_method_inliner.h"
26#include "dex/quick/dex_file_to_method_inliner_map.h"
27#include "driver/compiler_options.h"
28#include "utils/scoped_arena_containers.h"
29
30namespace art {
31
32  // Instruction characteristics used to statically identify computation-intensive methods.
33const uint32_t MIRGraph::analysis_attributes_[kMirOpLast] = {
34  // 00 NOP
35  AN_NONE,
36
37  // 01 MOVE vA, vB
38  AN_MOVE,
39
40  // 02 MOVE_FROM16 vAA, vBBBB
41  AN_MOVE,
42
43  // 03 MOVE_16 vAAAA, vBBBB
44  AN_MOVE,
45
46  // 04 MOVE_WIDE vA, vB
47  AN_MOVE,
48
49  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
50  AN_MOVE,
51
52  // 06 MOVE_WIDE_16 vAAAA, vBBBB
53  AN_MOVE,
54
55  // 07 MOVE_OBJECT vA, vB
56  AN_MOVE,
57
58  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
59  AN_MOVE,
60
61  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
62  AN_MOVE,
63
64  // 0A MOVE_RESULT vAA
65  AN_MOVE,
66
67  // 0B MOVE_RESULT_WIDE vAA
68  AN_MOVE,
69
70  // 0C MOVE_RESULT_OBJECT vAA
71  AN_MOVE,
72
73  // 0D MOVE_EXCEPTION vAA
74  AN_MOVE,
75
76  // 0E RETURN_VOID
77  AN_BRANCH,
78
79  // 0F RETURN vAA
80  AN_BRANCH,
81
82  // 10 RETURN_WIDE vAA
83  AN_BRANCH,
84
85  // 11 RETURN_OBJECT vAA
86  AN_BRANCH,
87
88  // 12 CONST_4 vA, #+B
89  AN_SIMPLECONST,
90
91  // 13 CONST_16 vAA, #+BBBB
92  AN_SIMPLECONST,
93
94  // 14 CONST vAA, #+BBBBBBBB
95  AN_SIMPLECONST,
96
97  // 15 CONST_HIGH16 VAA, #+BBBB0000
98  AN_SIMPLECONST,
99
100  // 16 CONST_WIDE_16 vAA, #+BBBB
101  AN_SIMPLECONST,
102
103  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
104  AN_SIMPLECONST,
105
106  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
107  AN_SIMPLECONST,
108
109  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
110  AN_SIMPLECONST,
111
112  // 1A CONST_STRING vAA, string@BBBB
113  AN_NONE,
114
115  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
116  AN_NONE,
117
118  // 1C CONST_CLASS vAA, type@BBBB
119  AN_NONE,
120
121  // 1D MONITOR_ENTER vAA
122  AN_NONE,
123
124  // 1E MONITOR_EXIT vAA
125  AN_NONE,
126
127  // 1F CHK_CAST vAA, type@BBBB
128  AN_NONE,
129
130  // 20 INSTANCE_OF vA, vB, type@CCCC
131  AN_NONE,
132
133  // 21 ARRAY_LENGTH vA, vB
134  AN_ARRAYOP,
135
136  // 22 NEW_INSTANCE vAA, type@BBBB
137  AN_HEAVYWEIGHT,
138
139  // 23 NEW_ARRAY vA, vB, type@CCCC
140  AN_HEAVYWEIGHT,
141
142  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
143  AN_HEAVYWEIGHT,
144
145  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
146  AN_HEAVYWEIGHT,
147
148  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
149  AN_NONE,
150
151  // 27 THROW vAA
152  AN_HEAVYWEIGHT | AN_BRANCH,
153
154  // 28 GOTO
155  AN_BRANCH,
156
157  // 29 GOTO_16
158  AN_BRANCH,
159
160  // 2A GOTO_32
161  AN_BRANCH,
162
163  // 2B PACKED_SWITCH vAA, +BBBBBBBB
164  AN_SWITCH,
165
166  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
167  AN_SWITCH,
168
169  // 2D CMPL_FLOAT vAA, vBB, vCC
170  AN_MATH | AN_FP | AN_SINGLE,
171
172  // 2E CMPG_FLOAT vAA, vBB, vCC
173  AN_MATH | AN_FP | AN_SINGLE,
174
175  // 2F CMPL_DOUBLE vAA, vBB, vCC
176  AN_MATH | AN_FP | AN_DOUBLE,
177
178  // 30 CMPG_DOUBLE vAA, vBB, vCC
179  AN_MATH | AN_FP | AN_DOUBLE,
180
181  // 31 CMP_LONG vAA, vBB, vCC
182  AN_MATH | AN_LONG,
183
184  // 32 IF_EQ vA, vB, +CCCC
185  AN_MATH | AN_BRANCH | AN_INT,
186
187  // 33 IF_NE vA, vB, +CCCC
188  AN_MATH | AN_BRANCH | AN_INT,
189
190  // 34 IF_LT vA, vB, +CCCC
191  AN_MATH | AN_BRANCH | AN_INT,
192
193  // 35 IF_GE vA, vB, +CCCC
194  AN_MATH | AN_BRANCH | AN_INT,
195
196  // 36 IF_GT vA, vB, +CCCC
197  AN_MATH | AN_BRANCH | AN_INT,
198
199  // 37 IF_LE vA, vB, +CCCC
200  AN_MATH | AN_BRANCH | AN_INT,
201
202  // 38 IF_EQZ vAA, +BBBB
203  AN_MATH | AN_BRANCH | AN_INT,
204
205  // 39 IF_NEZ vAA, +BBBB
206  AN_MATH | AN_BRANCH | AN_INT,
207
208  // 3A IF_LTZ vAA, +BBBB
209  AN_MATH | AN_BRANCH | AN_INT,
210
211  // 3B IF_GEZ vAA, +BBBB
212  AN_MATH | AN_BRANCH | AN_INT,
213
214  // 3C IF_GTZ vAA, +BBBB
215  AN_MATH | AN_BRANCH | AN_INT,
216
217  // 3D IF_LEZ vAA, +BBBB
218  AN_MATH | AN_BRANCH | AN_INT,
219
220  // 3E UNUSED_3E
221  AN_NONE,
222
223  // 3F UNUSED_3F
224  AN_NONE,
225
226  // 40 UNUSED_40
227  AN_NONE,
228
229  // 41 UNUSED_41
230  AN_NONE,
231
232  // 42 UNUSED_42
233  AN_NONE,
234
235  // 43 UNUSED_43
236  AN_NONE,
237
238  // 44 AGET vAA, vBB, vCC
239  AN_ARRAYOP,
240
241  // 45 AGET_WIDE vAA, vBB, vCC
242  AN_ARRAYOP,
243
244  // 46 AGET_OBJECT vAA, vBB, vCC
245  AN_ARRAYOP,
246
247  // 47 AGET_BOOLEAN vAA, vBB, vCC
248  AN_ARRAYOP,
249
250  // 48 AGET_BYTE vAA, vBB, vCC
251  AN_ARRAYOP,
252
253  // 49 AGET_CHAR vAA, vBB, vCC
254  AN_ARRAYOP,
255
256  // 4A AGET_SHORT vAA, vBB, vCC
257  AN_ARRAYOP,
258
259  // 4B APUT vAA, vBB, vCC
260  AN_ARRAYOP,
261
262  // 4C APUT_WIDE vAA, vBB, vCC
263  AN_ARRAYOP,
264
265  // 4D APUT_OBJECT vAA, vBB, vCC
266  AN_ARRAYOP,
267
268  // 4E APUT_BOOLEAN vAA, vBB, vCC
269  AN_ARRAYOP,
270
271  // 4F APUT_BYTE vAA, vBB, vCC
272  AN_ARRAYOP,
273
274  // 50 APUT_CHAR vAA, vBB, vCC
275  AN_ARRAYOP,
276
277  // 51 APUT_SHORT vAA, vBB, vCC
278  AN_ARRAYOP,
279
280  // 52 IGET vA, vB, field@CCCC
281  AN_NONE,
282
283  // 53 IGET_WIDE vA, vB, field@CCCC
284  AN_NONE,
285
286  // 54 IGET_OBJECT vA, vB, field@CCCC
287  AN_NONE,
288
289  // 55 IGET_BOOLEAN vA, vB, field@CCCC
290  AN_NONE,
291
292  // 56 IGET_BYTE vA, vB, field@CCCC
293  AN_NONE,
294
295  // 57 IGET_CHAR vA, vB, field@CCCC
296  AN_NONE,
297
298  // 58 IGET_SHORT vA, vB, field@CCCC
299  AN_NONE,
300
301  // 59 IPUT vA, vB, field@CCCC
302  AN_NONE,
303
304  // 5A IPUT_WIDE vA, vB, field@CCCC
305  AN_NONE,
306
307  // 5B IPUT_OBJECT vA, vB, field@CCCC
308  AN_NONE,
309
310  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
311  AN_NONE,
312
313  // 5D IPUT_BYTE vA, vB, field@CCCC
314  AN_NONE,
315
316  // 5E IPUT_CHAR vA, vB, field@CCCC
317  AN_NONE,
318
319  // 5F IPUT_SHORT vA, vB, field@CCCC
320  AN_NONE,
321
322  // 60 SGET vAA, field@BBBB
323  AN_NONE,
324
325  // 61 SGET_WIDE vAA, field@BBBB
326  AN_NONE,
327
328  // 62 SGET_OBJECT vAA, field@BBBB
329  AN_NONE,
330
331  // 63 SGET_BOOLEAN vAA, field@BBBB
332  AN_NONE,
333
334  // 64 SGET_BYTE vAA, field@BBBB
335  AN_NONE,
336
337  // 65 SGET_CHAR vAA, field@BBBB
338  AN_NONE,
339
340  // 66 SGET_SHORT vAA, field@BBBB
341  AN_NONE,
342
343  // 67 SPUT vAA, field@BBBB
344  AN_NONE,
345
346  // 68 SPUT_WIDE vAA, field@BBBB
347  AN_NONE,
348
349  // 69 SPUT_OBJECT vAA, field@BBBB
350  AN_NONE,
351
352  // 6A SPUT_BOOLEAN vAA, field@BBBB
353  AN_NONE,
354
355  // 6B SPUT_BYTE vAA, field@BBBB
356  AN_NONE,
357
358  // 6C SPUT_CHAR vAA, field@BBBB
359  AN_NONE,
360
361  // 6D SPUT_SHORT vAA, field@BBBB
362  AN_NONE,
363
364  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
365  AN_INVOKE | AN_HEAVYWEIGHT,
366
367  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
368  AN_INVOKE | AN_HEAVYWEIGHT,
369
370  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
371  AN_INVOKE | AN_HEAVYWEIGHT,
372
373  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
374  AN_INVOKE | AN_HEAVYWEIGHT,
375
376  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
377  AN_INVOKE | AN_HEAVYWEIGHT,
378
379  // 73 UNUSED_73
380  AN_NONE,
381
382  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
383  AN_INVOKE | AN_HEAVYWEIGHT,
384
385  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
386  AN_INVOKE | AN_HEAVYWEIGHT,
387
388  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
389  AN_INVOKE | AN_HEAVYWEIGHT,
390
391  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
392  AN_INVOKE | AN_HEAVYWEIGHT,
393
394  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
395  AN_INVOKE | AN_HEAVYWEIGHT,
396
397  // 79 UNUSED_79
398  AN_NONE,
399
400  // 7A UNUSED_7A
401  AN_NONE,
402
403  // 7B NEG_INT vA, vB
404  AN_MATH | AN_INT,
405
406  // 7C NOT_INT vA, vB
407  AN_MATH | AN_INT,
408
409  // 7D NEG_LONG vA, vB
410  AN_MATH | AN_LONG,
411
412  // 7E NOT_LONG vA, vB
413  AN_MATH | AN_LONG,
414
415  // 7F NEG_FLOAT vA, vB
416  AN_MATH | AN_FP | AN_SINGLE,
417
418  // 80 NEG_DOUBLE vA, vB
419  AN_MATH | AN_FP | AN_DOUBLE,
420
421  // 81 INT_TO_LONG vA, vB
422  AN_MATH | AN_INT | AN_LONG,
423
424  // 82 INT_TO_FLOAT vA, vB
425  AN_MATH | AN_FP | AN_INT | AN_SINGLE,
426
427  // 83 INT_TO_DOUBLE vA, vB
428  AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
429
430  // 84 LONG_TO_INT vA, vB
431  AN_MATH | AN_INT | AN_LONG,
432
433  // 85 LONG_TO_FLOAT vA, vB
434  AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
435
436  // 86 LONG_TO_DOUBLE vA, vB
437  AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
438
439  // 87 FLOAT_TO_INT vA, vB
440  AN_MATH | AN_FP | AN_INT | AN_SINGLE,
441
442  // 88 FLOAT_TO_LONG vA, vB
443  AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
444
445  // 89 FLOAT_TO_DOUBLE vA, vB
446  AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
447
448  // 8A DOUBLE_TO_INT vA, vB
449  AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
450
451  // 8B DOUBLE_TO_LONG vA, vB
452  AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
453
454  // 8C DOUBLE_TO_FLOAT vA, vB
455  AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
456
457  // 8D INT_TO_BYTE vA, vB
458  AN_MATH | AN_INT,
459
460  // 8E INT_TO_CHAR vA, vB
461  AN_MATH | AN_INT,
462
463  // 8F INT_TO_SHORT vA, vB
464  AN_MATH | AN_INT,
465
466  // 90 ADD_INT vAA, vBB, vCC
467  AN_MATH | AN_INT,
468
469  // 91 SUB_INT vAA, vBB, vCC
470  AN_MATH | AN_INT,
471
472  // 92 MUL_INT vAA, vBB, vCC
473  AN_MATH | AN_INT,
474
475  // 93 DIV_INT vAA, vBB, vCC
476  AN_MATH | AN_INT,
477
478  // 94 REM_INT vAA, vBB, vCC
479  AN_MATH | AN_INT,
480
481  // 95 AND_INT vAA, vBB, vCC
482  AN_MATH | AN_INT,
483
484  // 96 OR_INT vAA, vBB, vCC
485  AN_MATH | AN_INT,
486
487  // 97 XOR_INT vAA, vBB, vCC
488  AN_MATH | AN_INT,
489
490  // 98 SHL_INT vAA, vBB, vCC
491  AN_MATH | AN_INT,
492
493  // 99 SHR_INT vAA, vBB, vCC
494  AN_MATH | AN_INT,
495
496  // 9A USHR_INT vAA, vBB, vCC
497  AN_MATH | AN_INT,
498
499  // 9B ADD_LONG vAA, vBB, vCC
500  AN_MATH | AN_LONG,
501
502  // 9C SUB_LONG vAA, vBB, vCC
503  AN_MATH | AN_LONG,
504
505  // 9D MUL_LONG vAA, vBB, vCC
506  AN_MATH | AN_LONG,
507
508  // 9E DIV_LONG vAA, vBB, vCC
509  AN_MATH | AN_LONG,
510
511  // 9F REM_LONG vAA, vBB, vCC
512  AN_MATH | AN_LONG,
513
514  // A0 AND_LONG vAA, vBB, vCC
515  AN_MATH | AN_LONG,
516
517  // A1 OR_LONG vAA, vBB, vCC
518  AN_MATH | AN_LONG,
519
520  // A2 XOR_LONG vAA, vBB, vCC
521  AN_MATH | AN_LONG,
522
523  // A3 SHL_LONG vAA, vBB, vCC
524  AN_MATH | AN_LONG,
525
526  // A4 SHR_LONG vAA, vBB, vCC
527  AN_MATH | AN_LONG,
528
529  // A5 USHR_LONG vAA, vBB, vCC
530  AN_MATH | AN_LONG,
531
532  // A6 ADD_FLOAT vAA, vBB, vCC
533  AN_MATH | AN_FP | AN_SINGLE,
534
535  // A7 SUB_FLOAT vAA, vBB, vCC
536  AN_MATH | AN_FP | AN_SINGLE,
537
538  // A8 MUL_FLOAT vAA, vBB, vCC
539  AN_MATH | AN_FP | AN_SINGLE,
540
541  // A9 DIV_FLOAT vAA, vBB, vCC
542  AN_MATH | AN_FP | AN_SINGLE,
543
544  // AA REM_FLOAT vAA, vBB, vCC
545  AN_MATH | AN_FP | AN_SINGLE,
546
547  // AB ADD_DOUBLE vAA, vBB, vCC
548  AN_MATH | AN_FP | AN_DOUBLE,
549
550  // AC SUB_DOUBLE vAA, vBB, vCC
551  AN_MATH | AN_FP | AN_DOUBLE,
552
553  // AD MUL_DOUBLE vAA, vBB, vCC
554  AN_MATH | AN_FP | AN_DOUBLE,
555
556  // AE DIV_DOUBLE vAA, vBB, vCC
557  AN_MATH | AN_FP | AN_DOUBLE,
558
559  // AF REM_DOUBLE vAA, vBB, vCC
560  AN_MATH | AN_FP | AN_DOUBLE,
561
562  // B0 ADD_INT_2ADDR vA, vB
563  AN_MATH | AN_INT,
564
565  // B1 SUB_INT_2ADDR vA, vB
566  AN_MATH | AN_INT,
567
568  // B2 MUL_INT_2ADDR vA, vB
569  AN_MATH | AN_INT,
570
571  // B3 DIV_INT_2ADDR vA, vB
572  AN_MATH | AN_INT,
573
574  // B4 REM_INT_2ADDR vA, vB
575  AN_MATH | AN_INT,
576
577  // B5 AND_INT_2ADDR vA, vB
578  AN_MATH | AN_INT,
579
580  // B6 OR_INT_2ADDR vA, vB
581  AN_MATH | AN_INT,
582
583  // B7 XOR_INT_2ADDR vA, vB
584  AN_MATH | AN_INT,
585
586  // B8 SHL_INT_2ADDR vA, vB
587  AN_MATH | AN_INT,
588
589  // B9 SHR_INT_2ADDR vA, vB
590  AN_MATH | AN_INT,
591
592  // BA USHR_INT_2ADDR vA, vB
593  AN_MATH | AN_INT,
594
595  // BB ADD_LONG_2ADDR vA, vB
596  AN_MATH | AN_LONG,
597
598  // BC SUB_LONG_2ADDR vA, vB
599  AN_MATH | AN_LONG,
600
601  // BD MUL_LONG_2ADDR vA, vB
602  AN_MATH | AN_LONG,
603
604  // BE DIV_LONG_2ADDR vA, vB
605  AN_MATH | AN_LONG,
606
607  // BF REM_LONG_2ADDR vA, vB
608  AN_MATH | AN_LONG,
609
610  // C0 AND_LONG_2ADDR vA, vB
611  AN_MATH | AN_LONG,
612
613  // C1 OR_LONG_2ADDR vA, vB
614  AN_MATH | AN_LONG,
615
616  // C2 XOR_LONG_2ADDR vA, vB
617  AN_MATH | AN_LONG,
618
619  // C3 SHL_LONG_2ADDR vA, vB
620  AN_MATH | AN_LONG,
621
622  // C4 SHR_LONG_2ADDR vA, vB
623  AN_MATH | AN_LONG,
624
625  // C5 USHR_LONG_2ADDR vA, vB
626  AN_MATH | AN_LONG,
627
628  // C6 ADD_FLOAT_2ADDR vA, vB
629  AN_MATH | AN_FP | AN_SINGLE,
630
631  // C7 SUB_FLOAT_2ADDR vA, vB
632  AN_MATH | AN_FP | AN_SINGLE,
633
634  // C8 MUL_FLOAT_2ADDR vA, vB
635  AN_MATH | AN_FP | AN_SINGLE,
636
637  // C9 DIV_FLOAT_2ADDR vA, vB
638  AN_MATH | AN_FP | AN_SINGLE,
639
640  // CA REM_FLOAT_2ADDR vA, vB
641  AN_MATH | AN_FP | AN_SINGLE,
642
643  // CB ADD_DOUBLE_2ADDR vA, vB
644  AN_MATH | AN_FP | AN_DOUBLE,
645
646  // CC SUB_DOUBLE_2ADDR vA, vB
647  AN_MATH | AN_FP | AN_DOUBLE,
648
649  // CD MUL_DOUBLE_2ADDR vA, vB
650  AN_MATH | AN_FP | AN_DOUBLE,
651
652  // CE DIV_DOUBLE_2ADDR vA, vB
653  AN_MATH | AN_FP | AN_DOUBLE,
654
655  // CF REM_DOUBLE_2ADDR vA, vB
656  AN_MATH | AN_FP | AN_DOUBLE,
657
658  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
659  AN_MATH | AN_INT,
660
661  // D1 RSUB_INT vA, vB, #+CCCC
662  AN_MATH | AN_INT,
663
664  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
665  AN_MATH | AN_INT,
666
667  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
668  AN_MATH | AN_INT,
669
670  // D4 REM_INT_LIT16 vA, vB, #+CCCC
671  AN_MATH | AN_INT,
672
673  // D5 AND_INT_LIT16 vA, vB, #+CCCC
674  AN_MATH | AN_INT,
675
676  // D6 OR_INT_LIT16 vA, vB, #+CCCC
677  AN_MATH | AN_INT,
678
679  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
680  AN_MATH | AN_INT,
681
682  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
683  AN_MATH | AN_INT,
684
685  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
686  AN_MATH | AN_INT,
687
688  // DA MUL_INT_LIT8 vAA, vBB, #+CC
689  AN_MATH | AN_INT,
690
691  // DB DIV_INT_LIT8 vAA, vBB, #+CC
692  AN_MATH | AN_INT,
693
694  // DC REM_INT_LIT8 vAA, vBB, #+CC
695  AN_MATH | AN_INT,
696
697  // DD AND_INT_LIT8 vAA, vBB, #+CC
698  AN_MATH | AN_INT,
699
700  // DE OR_INT_LIT8 vAA, vBB, #+CC
701  AN_MATH | AN_INT,
702
703  // DF XOR_INT_LIT8 vAA, vBB, #+CC
704  AN_MATH | AN_INT,
705
706  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
707  AN_MATH | AN_INT,
708
709  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
710  AN_MATH | AN_INT,
711
712  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
713  AN_MATH | AN_INT,
714
715  // E3 IGET_VOLATILE
716  AN_NONE,
717
718  // E4 IPUT_VOLATILE
719  AN_NONE,
720
721  // E5 SGET_VOLATILE
722  AN_NONE,
723
724  // E6 SPUT_VOLATILE
725  AN_NONE,
726
727  // E7 IGET_OBJECT_VOLATILE
728  AN_NONE,
729
730  // E8 IGET_WIDE_VOLATILE
731  AN_NONE,
732
733  // E9 IPUT_WIDE_VOLATILE
734  AN_NONE,
735
736  // EA SGET_WIDE_VOLATILE
737  AN_NONE,
738
739  // EB SPUT_WIDE_VOLATILE
740  AN_NONE,
741
742  // EC BREAKPOINT
743  AN_NONE,
744
745  // ED THROW_VERIFICATION_ERROR
746  AN_HEAVYWEIGHT | AN_BRANCH,
747
748  // EE EXECUTE_INLINE
749  AN_NONE,
750
751  // EF EXECUTE_INLINE_RANGE
752  AN_NONE,
753
754  // F0 INVOKE_OBJECT_INIT_RANGE
755  AN_INVOKE | AN_HEAVYWEIGHT,
756
757  // F1 RETURN_VOID_BARRIER
758  AN_BRANCH,
759
760  // F2 IGET_QUICK
761  AN_NONE,
762
763  // F3 IGET_WIDE_QUICK
764  AN_NONE,
765
766  // F4 IGET_OBJECT_QUICK
767  AN_NONE,
768
769  // F5 IPUT_QUICK
770  AN_NONE,
771
772  // F6 IPUT_WIDE_QUICK
773  AN_NONE,
774
775  // F7 IPUT_OBJECT_QUICK
776  AN_NONE,
777
778  // F8 INVOKE_VIRTUAL_QUICK
779  AN_INVOKE | AN_HEAVYWEIGHT,
780
781  // F9 INVOKE_VIRTUAL_QUICK_RANGE
782  AN_INVOKE | AN_HEAVYWEIGHT,
783
784  // FA INVOKE_SUPER_QUICK
785  AN_INVOKE | AN_HEAVYWEIGHT,
786
787  // FB INVOKE_SUPER_QUICK_RANGE
788  AN_INVOKE | AN_HEAVYWEIGHT,
789
790  // FC IPUT_OBJECT_VOLATILE
791  AN_NONE,
792
793  // FD SGET_OBJECT_VOLATILE
794  AN_NONE,
795
796  // FE SPUT_OBJECT_VOLATILE
797  AN_NONE,
798
799  // FF UNUSED_FF
800  AN_NONE,
801
802  // Beginning of extended MIR opcodes
803  // 100 MIR_PHI
804  AN_NONE,
805
806  // 101 MIR_COPY
807  AN_NONE,
808
809  // 102 MIR_FUSED_CMPL_FLOAT
810  AN_NONE,
811
812  // 103 MIR_FUSED_CMPG_FLOAT
813  AN_NONE,
814
815  // 104 MIR_FUSED_CMPL_DOUBLE
816  AN_NONE,
817
818  // 105 MIR_FUSED_CMPG_DOUBLE
819  AN_NONE,
820
821  // 106 MIR_FUSED_CMP_LONG
822  AN_NONE,
823
824  // 107 MIR_NOP
825  AN_NONE,
826
827  // 108 MIR_NULL_CHECK
828  AN_NONE,
829
830  // 109 MIR_RANGE_CHECK
831  AN_NONE,
832
833  // 110 MIR_DIV_ZERO_CHECK
834  AN_NONE,
835
836  // 111 MIR_CHECK
837  AN_NONE,
838
839  // 112 MIR_CHECKPART2
840  AN_NONE,
841
842  // 113 MIR_SELECT
843  AN_NONE,
844};
845
846struct MethodStats {
847  int dex_instructions;
848  int math_ops;
849  int fp_ops;
850  int array_ops;
851  int branch_ops;
852  int heavyweight_ops;
853  bool has_computational_loop;
854  bool has_switch;
855  float math_ratio;
856  float fp_ratio;
857  float array_ratio;
858  float branch_ratio;
859  float heavyweight_ratio;
860};
861
862void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
863  if (bb->visited || (bb->block_type != kDalvikByteCode)) {
864    return;
865  }
866  bool computational_block = true;
867  bool has_math = false;
868  /*
869   * For the purposes of this scan, we want to treat the set of basic blocks broken
870   * by an exception edge as a single basic block.  We'll scan forward along the fallthrough
871   * edges until we reach an explicit branch or return.
872   */
873  BasicBlock* ending_bb = bb;
874  if (ending_bb->last_mir_insn != NULL) {
875    uint32_t ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
876    while ((ending_flags & AN_BRANCH) == 0) {
877      ending_bb = GetBasicBlock(ending_bb->fall_through);
878      ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
879    }
880  }
881  /*
882   * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
883   * first need to do some expensive loop detection - and the point of this is to make
884   * an informed guess before investing in computation.  However, we can cheaply detect
885   * many simple loop forms without having to do full dataflow analysis.
886   */
887  int loop_scale_factor = 1;
888  // Simple for and while loops
889  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
890    if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
891        (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
892      loop_scale_factor = 25;
893    }
894  }
895  // Simple do-while loop
896  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
897    loop_scale_factor = 25;
898  }
899
900  BasicBlock* tbb = bb;
901  bool done = false;
902  while (!done) {
903    tbb->visited = true;
904    for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
905      if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
906        // Skip any MIR pseudo-op.
907        continue;
908      }
909      uint32_t flags = analysis_attributes_[mir->dalvikInsn.opcode];
910      stats->dex_instructions += loop_scale_factor;
911      if ((flags & AN_BRANCH) == 0) {
912        computational_block &= ((flags & AN_COMPUTATIONAL) != 0);
913      } else {
914        stats->branch_ops += loop_scale_factor;
915      }
916      if ((flags & AN_MATH) != 0) {
917        stats->math_ops += loop_scale_factor;
918        has_math = true;
919      }
920      if ((flags & AN_FP) != 0) {
921        stats->fp_ops += loop_scale_factor;
922      }
923      if ((flags & AN_ARRAYOP) != 0) {
924        stats->array_ops += loop_scale_factor;
925      }
926      if ((flags & AN_HEAVYWEIGHT) != 0) {
927        stats->heavyweight_ops += loop_scale_factor;
928      }
929      if ((flags & AN_SWITCH) != 0) {
930        stats->has_switch = true;
931      }
932    }
933    if (tbb == ending_bb) {
934      done = true;
935    } else {
936      tbb = GetBasicBlock(tbb->fall_through);
937    }
938  }
939  if (has_math && computational_block && (loop_scale_factor > 1)) {
940    stats->has_computational_loop = true;
941  }
942}
943
944bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
945                                      std::string* skip_message) {
946  float count = stats->dex_instructions;
947  stats->math_ratio = stats->math_ops / count;
948  stats->fp_ratio = stats->fp_ops / count;
949  stats->branch_ratio = stats->branch_ops / count;
950  stats->array_ratio = stats->array_ops / count;
951  stats->heavyweight_ratio = stats->heavyweight_ops / count;
952
953  if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
954    LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
955              << stats->math_ratio << ", fp:"
956              << stats->fp_ratio << ", br:"
957              << stats->branch_ratio << ", hw:"
958              << stats->heavyweight_ratio << ", arr:"
959              << stats->array_ratio << ", hot:"
960              << stats->has_computational_loop << ", "
961              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
962  }
963
964  // Computation intensive?
965  if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
966    return false;
967  }
968
969  // Complex, logic-intensive?
970  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
971      stats->branch_ratio > 0.3) {
972    return false;
973  }
974
975  // Significant floating point?
976  if (stats->fp_ratio > 0.05) {
977    return false;
978  }
979
980  // Significant generic math?
981  if (stats->math_ratio > 0.3) {
982    return false;
983  }
984
985  // If array-intensive, compiling is probably worthwhile.
986  if (stats->array_ratio > 0.1) {
987    return false;
988  }
989
990  // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
991  if (stats->has_switch) {
992    return false;
993  }
994
995  // If significant in size and high proportion of expensive operations, skip.
996  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
997      (stats->heavyweight_ratio > 0.3)) {
998    *skip_message = "Is a small method with heavyweight ratio " +
999                    std::to_string(stats->heavyweight_ratio);
1000    return true;
1001  }
1002
1003  return skip_default;
1004}
1005
1006 /*
1007  * Will eventually want this to be a bit more sophisticated and happen at verification time.
1008  */
1009bool MIRGraph::SkipCompilation(std::string* skip_message) {
1010  const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
1011  CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
1012  if (compiler_filter == CompilerOptions::kEverything) {
1013    return false;
1014  }
1015
1016  // Contains a pattern we don't want to compile?
1017  if (PuntToInterpreter()) {
1018    *skip_message = "Punt to interpreter set";
1019    return true;
1020  }
1021
1022  if (!compiler_options.IsCompilationEnabled()) {
1023    *skip_message = "Compilation disabled";
1024    return true;
1025  }
1026
1027  // Set up compilation cutoffs based on current filter mode.
1028  size_t small_cutoff = 0;
1029  size_t default_cutoff = 0;
1030  switch (compiler_filter) {
1031    case CompilerOptions::kBalanced:
1032      small_cutoff = compiler_options.GetSmallMethodThreshold();
1033      default_cutoff = compiler_options.GetLargeMethodThreshold();
1034      break;
1035    case CompilerOptions::kSpace:
1036      small_cutoff = compiler_options.GetTinyMethodThreshold();
1037      default_cutoff = compiler_options.GetSmallMethodThreshold();
1038      break;
1039    case CompilerOptions::kSpeed:
1040      small_cutoff = compiler_options.GetHugeMethodThreshold();
1041      default_cutoff = compiler_options.GetHugeMethodThreshold();
1042      break;
1043    default:
1044      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
1045  }
1046
1047  // If size < cutoff, assume we'll compile - but allow removal.
1048  bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
1049  if (skip_compilation) {
1050    *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
1051  }
1052
1053  /*
1054   * Filter 1: Huge methods are likely to be machine generated, but some aren't.
1055   * If huge, assume we won't compile, but allow futher analysis to turn it back on.
1056   */
1057  if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
1058    skip_compilation = true;
1059    *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
1060    // If we're got a huge number of basic blocks, don't bother with further analysis.
1061    if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
1062      return true;
1063    }
1064  } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
1065    /* If it's large and contains no branches, it's likely to be machine generated initialization */
1066      (GetBranchCount() == 0)) {
1067    *skip_message = "Large method with no branches";
1068    return true;
1069  } else if (compiler_filter == CompilerOptions::kSpeed) {
1070    // If not huge, compile.
1071    return false;
1072  }
1073
1074  // Filter 2: Skip class initializers.
1075  if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
1076    *skip_message = "Class initializer";
1077    return true;
1078  }
1079
1080  // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
1081  if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
1082      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1083          ->IsSpecial(cu_->method_idx)) {
1084    return false;
1085  }
1086
1087  // Filter 4: if small, just compile.
1088  if (GetNumDalvikInsns() < small_cutoff) {
1089    return false;
1090  }
1091
1092  // Analyze graph for:
1093  //  o floating point computation
1094  //  o basic blocks contained in loop with heavy arithmetic.
1095  //  o proportion of conditional branches.
1096
1097  MethodStats stats;
1098  memset(&stats, 0, sizeof(stats));
1099
1100  ClearAllVisitedFlags();
1101  AllNodesIterator iter(this);
1102  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1103    AnalyzeBlock(bb, &stats);
1104  }
1105
1106  return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
1107}
1108
1109void MIRGraph::DoCacheFieldLoweringInfo() {
1110  // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
1111  const uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
1112  ScopedArenaAllocator allocator(&cu_->arena_stack);
1113  uint16_t* field_idxs =
1114      reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
1115
1116  // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
1117  size_t ifield_pos = 0u;
1118  size_t sfield_pos = max_refs;
1119  AllNodesIterator iter(this);
1120  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1121    if (bb->block_type != kDalvikByteCode) {
1122      continue;
1123    }
1124    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1125      if (mir->dalvikInsn.opcode >= Instruction::IGET &&
1126          mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
1127        const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
1128        // Get field index and try to find it among existing indexes. If found, it's usually among
1129        // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
1130        // is a linear search, it actually performs much better than map based approach.
1131        if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
1132          uint16_t field_idx = insn->VRegC_22c();
1133          size_t i = ifield_pos;
1134          while (i != 0u && field_idxs[i - 1] != field_idx) {
1135            --i;
1136          }
1137          if (i != 0u) {
1138            mir->meta.ifield_lowering_info = i - 1;
1139          } else {
1140            mir->meta.ifield_lowering_info = ifield_pos;
1141            field_idxs[ifield_pos++] = field_idx;
1142          }
1143        } else {
1144          uint16_t field_idx = insn->VRegB_21c();
1145          size_t i = sfield_pos;
1146          while (i != max_refs && field_idxs[i] != field_idx) {
1147            ++i;
1148          }
1149          if (i != max_refs) {
1150            mir->meta.sfield_lowering_info = max_refs - i - 1u;
1151          } else {
1152            mir->meta.sfield_lowering_info = max_refs - sfield_pos;
1153            field_idxs[--sfield_pos] = field_idx;
1154          }
1155        }
1156        DCHECK_LE(ifield_pos, sfield_pos);
1157      }
1158    }
1159  }
1160
1161  if (ifield_pos != 0u) {
1162    // Resolve instance field infos.
1163    DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
1164    ifield_lowering_infos_.Resize(ifield_pos);
1165    for (size_t pos = 0u; pos != ifield_pos; ++pos) {
1166      ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
1167    }
1168    MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1169                                ifield_lowering_infos_.GetRawStorage(), ifield_pos);
1170  }
1171
1172  if (sfield_pos != max_refs) {
1173    // Resolve static field infos.
1174    DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
1175    sfield_lowering_infos_.Resize(max_refs - sfield_pos);
1176    for (size_t pos = max_refs; pos != sfield_pos;) {
1177      --pos;
1178      sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
1179    }
1180    MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1181                                sfield_lowering_infos_.GetRawStorage(), max_refs - sfield_pos);
1182  }
1183}
1184
1185void MIRGraph::DoCacheMethodLoweringInfo() {
1186  static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
1187
1188  // Embed the map value in the entry to avoid extra padding in 64-bit builds.
1189  struct MapEntry {
1190    // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
1191    const MethodReference* devirt_target;
1192    uint16_t target_method_idx;
1193    uint16_t invoke_type;
1194    // Map value.
1195    uint32_t lowering_info_index;
1196  };
1197
1198  // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
1199  struct MapEntryComparator {
1200    bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1201      if (lhs.target_method_idx != rhs.target_method_idx) {
1202        return lhs.target_method_idx < rhs.target_method_idx;
1203      }
1204      if (lhs.invoke_type != rhs.invoke_type) {
1205        return lhs.invoke_type < rhs.invoke_type;
1206      }
1207      if (lhs.devirt_target != rhs.devirt_target) {
1208        if (lhs.devirt_target == nullptr) {
1209          return true;
1210        }
1211        if (rhs.devirt_target == nullptr) {
1212          return false;
1213        }
1214        return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
1215      }
1216      return false;
1217    }
1218    MethodReferenceComparator devirt_cmp;
1219  };
1220
1221  ScopedArenaAllocator allocator(&cu_->arena_stack);
1222
1223  // All INVOKE instructions take 3 code units and there must also be a RETURN.
1224  uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
1225
1226  // Map invoke key (see MapEntry) to lowering info index and vice versa.
1227  // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
1228  // multi_index_container with one ordered index and one sequential index.
1229  ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
1230                                                          allocator.Adapter());
1231  const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
1232      allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
1233
1234  // Find INVOKE insns and their devirtualization targets.
1235  AllNodesIterator iter(this);
1236  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1237    if (bb->block_type != kDalvikByteCode) {
1238      continue;
1239    }
1240    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1241      if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
1242          mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
1243          mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
1244        // Decode target method index and invoke type.
1245        const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
1246        uint16_t target_method_idx;
1247        uint16_t invoke_type_idx;
1248        if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
1249          target_method_idx = insn->VRegB_35c();
1250          invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
1251        } else {
1252          target_method_idx = insn->VRegB_3rc();
1253          invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
1254        }
1255
1256        // Find devirtualization target.
1257        // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
1258        // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
1259        // and increment it as needed instead of making O(log n) lookups.
1260        const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
1261        const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
1262
1263        // Try to insert a new entry. If the insertion fails, we will have found an old one.
1264        MapEntry entry = {
1265            devirt_target,
1266            target_method_idx,
1267            invoke_types[invoke_type_idx],
1268            static_cast<uint32_t>(invoke_map.size())
1269        };
1270        auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
1271        mir->meta.method_lowering_info = it->lowering_info_index;
1272        // If we didn't actually insert, this will just overwrite an existing value with the same.
1273        sequential_entries[it->lowering_info_index] = &*it;
1274      }
1275    }
1276  }
1277
1278  if (invoke_map.empty()) {
1279    return;
1280  }
1281
1282  // Prepare unique method infos, set method info indexes for their MIRs.
1283  DCHECK_EQ(method_lowering_infos_.Size(), 0u);
1284  const size_t count = invoke_map.size();
1285  method_lowering_infos_.Resize(count);
1286  for (size_t pos = 0u; pos != count; ++pos) {
1287    const MapEntry* entry = sequential_entries[pos];
1288    MirMethodLoweringInfo method_info(entry->target_method_idx,
1289                                      static_cast<InvokeType>(entry->invoke_type));
1290    if (entry->devirt_target != nullptr) {
1291      method_info.SetDevirtualizationTarget(*entry->devirt_target);
1292    }
1293    method_lowering_infos_.Insert(method_info);
1294  }
1295  MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1296                                 method_lowering_infos_.GetRawStorage(), count);
1297}
1298
1299bool MIRGraph::SkipCompilationByName(const std::string& methodname) {
1300  return cu_->compiler_driver->SkipCompilation(methodname);
1301}
1302
1303}  // namespace art
1304