mir_analysis.cc revision 69f08baaa4b70ce32a258f3da43cf12f2a034696
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <algorithm>
18#include "compiler_internals.h"
19#include "dataflow_iterator-inl.h"
20#include "dex_instruction.h"
21#include "dex_instruction-inl.h"
22#include "dex/verified_method.h"
23#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
25#include "driver/compiler_options.h"
26#include "UniquePtr.h"
27#include "utils/scoped_arena_containers.h"
28
29namespace art {
30
31  // Instruction characteristics used to statically identify computation-intensive methods.
32const uint32_t MIRGraph::analysis_attributes_[kMirOpLast] = {
33  // 00 NOP
34  AN_NONE,
35
36  // 01 MOVE vA, vB
37  AN_MOVE,
38
39  // 02 MOVE_FROM16 vAA, vBBBB
40  AN_MOVE,
41
42  // 03 MOVE_16 vAAAA, vBBBB
43  AN_MOVE,
44
45  // 04 MOVE_WIDE vA, vB
46  AN_MOVE,
47
48  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
49  AN_MOVE,
50
51  // 06 MOVE_WIDE_16 vAAAA, vBBBB
52  AN_MOVE,
53
54  // 07 MOVE_OBJECT vA, vB
55  AN_MOVE,
56
57  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
58  AN_MOVE,
59
60  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
61  AN_MOVE,
62
63  // 0A MOVE_RESULT vAA
64  AN_MOVE,
65
66  // 0B MOVE_RESULT_WIDE vAA
67  AN_MOVE,
68
69  // 0C MOVE_RESULT_OBJECT vAA
70  AN_MOVE,
71
72  // 0D MOVE_EXCEPTION vAA
73  AN_MOVE,
74
75  // 0E RETURN_VOID
76  AN_BRANCH,
77
78  // 0F RETURN vAA
79  AN_BRANCH,
80
81  // 10 RETURN_WIDE vAA
82  AN_BRANCH,
83
84  // 11 RETURN_OBJECT vAA
85  AN_BRANCH,
86
87  // 12 CONST_4 vA, #+B
88  AN_SIMPLECONST,
89
90  // 13 CONST_16 vAA, #+BBBB
91  AN_SIMPLECONST,
92
93  // 14 CONST vAA, #+BBBBBBBB
94  AN_SIMPLECONST,
95
96  // 15 CONST_HIGH16 VAA, #+BBBB0000
97  AN_SIMPLECONST,
98
99  // 16 CONST_WIDE_16 vAA, #+BBBB
100  AN_SIMPLECONST,
101
102  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
103  AN_SIMPLECONST,
104
105  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
106  AN_SIMPLECONST,
107
108  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
109  AN_SIMPLECONST,
110
111  // 1A CONST_STRING vAA, string@BBBB
112  AN_NONE,
113
114  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
115  AN_NONE,
116
117  // 1C CONST_CLASS vAA, type@BBBB
118  AN_NONE,
119
120  // 1D MONITOR_ENTER vAA
121  AN_NONE,
122
123  // 1E MONITOR_EXIT vAA
124  AN_NONE,
125
126  // 1F CHK_CAST vAA, type@BBBB
127  AN_NONE,
128
129  // 20 INSTANCE_OF vA, vB, type@CCCC
130  AN_NONE,
131
132  // 21 ARRAY_LENGTH vA, vB
133  AN_ARRAYOP,
134
135  // 22 NEW_INSTANCE vAA, type@BBBB
136  AN_HEAVYWEIGHT,
137
138  // 23 NEW_ARRAY vA, vB, type@CCCC
139  AN_HEAVYWEIGHT,
140
141  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
142  AN_HEAVYWEIGHT,
143
144  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
145  AN_HEAVYWEIGHT,
146
147  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
148  AN_NONE,
149
150  // 27 THROW vAA
151  AN_HEAVYWEIGHT | AN_BRANCH,
152
153  // 28 GOTO
154  AN_BRANCH,
155
156  // 29 GOTO_16
157  AN_BRANCH,
158
159  // 2A GOTO_32
160  AN_BRANCH,
161
162  // 2B PACKED_SWITCH vAA, +BBBBBBBB
163  AN_SWITCH,
164
165  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
166  AN_SWITCH,
167
168  // 2D CMPL_FLOAT vAA, vBB, vCC
169  AN_MATH | AN_FP | AN_SINGLE,
170
171  // 2E CMPG_FLOAT vAA, vBB, vCC
172  AN_MATH | AN_FP | AN_SINGLE,
173
174  // 2F CMPL_DOUBLE vAA, vBB, vCC
175  AN_MATH | AN_FP | AN_DOUBLE,
176
177  // 30 CMPG_DOUBLE vAA, vBB, vCC
178  AN_MATH | AN_FP | AN_DOUBLE,
179
180  // 31 CMP_LONG vAA, vBB, vCC
181  AN_MATH | AN_LONG,
182
183  // 32 IF_EQ vA, vB, +CCCC
184  AN_MATH | AN_BRANCH | AN_INT,
185
186  // 33 IF_NE vA, vB, +CCCC
187  AN_MATH | AN_BRANCH | AN_INT,
188
189  // 34 IF_LT vA, vB, +CCCC
190  AN_MATH | AN_BRANCH | AN_INT,
191
192  // 35 IF_GE vA, vB, +CCCC
193  AN_MATH | AN_BRANCH | AN_INT,
194
195  // 36 IF_GT vA, vB, +CCCC
196  AN_MATH | AN_BRANCH | AN_INT,
197
198  // 37 IF_LE vA, vB, +CCCC
199  AN_MATH | AN_BRANCH | AN_INT,
200
201  // 38 IF_EQZ vAA, +BBBB
202  AN_MATH | AN_BRANCH | AN_INT,
203
204  // 39 IF_NEZ vAA, +BBBB
205  AN_MATH | AN_BRANCH | AN_INT,
206
207  // 3A IF_LTZ vAA, +BBBB
208  AN_MATH | AN_BRANCH | AN_INT,
209
210  // 3B IF_GEZ vAA, +BBBB
211  AN_MATH | AN_BRANCH | AN_INT,
212
213  // 3C IF_GTZ vAA, +BBBB
214  AN_MATH | AN_BRANCH | AN_INT,
215
216  // 3D IF_LEZ vAA, +BBBB
217  AN_MATH | AN_BRANCH | AN_INT,
218
219  // 3E UNUSED_3E
220  AN_NONE,
221
222  // 3F UNUSED_3F
223  AN_NONE,
224
225  // 40 UNUSED_40
226  AN_NONE,
227
228  // 41 UNUSED_41
229  AN_NONE,
230
231  // 42 UNUSED_42
232  AN_NONE,
233
234  // 43 UNUSED_43
235  AN_NONE,
236
237  // 44 AGET vAA, vBB, vCC
238  AN_ARRAYOP,
239
240  // 45 AGET_WIDE vAA, vBB, vCC
241  AN_ARRAYOP,
242
243  // 46 AGET_OBJECT vAA, vBB, vCC
244  AN_ARRAYOP,
245
246  // 47 AGET_BOOLEAN vAA, vBB, vCC
247  AN_ARRAYOP,
248
249  // 48 AGET_BYTE vAA, vBB, vCC
250  AN_ARRAYOP,
251
252  // 49 AGET_CHAR vAA, vBB, vCC
253  AN_ARRAYOP,
254
255  // 4A AGET_SHORT vAA, vBB, vCC
256  AN_ARRAYOP,
257
258  // 4B APUT vAA, vBB, vCC
259  AN_ARRAYOP,
260
261  // 4C APUT_WIDE vAA, vBB, vCC
262  AN_ARRAYOP,
263
264  // 4D APUT_OBJECT vAA, vBB, vCC
265  AN_ARRAYOP,
266
267  // 4E APUT_BOOLEAN vAA, vBB, vCC
268  AN_ARRAYOP,
269
270  // 4F APUT_BYTE vAA, vBB, vCC
271  AN_ARRAYOP,
272
273  // 50 APUT_CHAR vAA, vBB, vCC
274  AN_ARRAYOP,
275
276  // 51 APUT_SHORT vAA, vBB, vCC
277  AN_ARRAYOP,
278
279  // 52 IGET vA, vB, field@CCCC
280  AN_NONE,
281
282  // 53 IGET_WIDE vA, vB, field@CCCC
283  AN_NONE,
284
285  // 54 IGET_OBJECT vA, vB, field@CCCC
286  AN_NONE,
287
288  // 55 IGET_BOOLEAN vA, vB, field@CCCC
289  AN_NONE,
290
291  // 56 IGET_BYTE vA, vB, field@CCCC
292  AN_NONE,
293
294  // 57 IGET_CHAR vA, vB, field@CCCC
295  AN_NONE,
296
297  // 58 IGET_SHORT vA, vB, field@CCCC
298  AN_NONE,
299
300  // 59 IPUT vA, vB, field@CCCC
301  AN_NONE,
302
303  // 5A IPUT_WIDE vA, vB, field@CCCC
304  AN_NONE,
305
306  // 5B IPUT_OBJECT vA, vB, field@CCCC
307  AN_NONE,
308
309  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
310  AN_NONE,
311
312  // 5D IPUT_BYTE vA, vB, field@CCCC
313  AN_NONE,
314
315  // 5E IPUT_CHAR vA, vB, field@CCCC
316  AN_NONE,
317
318  // 5F IPUT_SHORT vA, vB, field@CCCC
319  AN_NONE,
320
321  // 60 SGET vAA, field@BBBB
322  AN_NONE,
323
324  // 61 SGET_WIDE vAA, field@BBBB
325  AN_NONE,
326
327  // 62 SGET_OBJECT vAA, field@BBBB
328  AN_NONE,
329
330  // 63 SGET_BOOLEAN vAA, field@BBBB
331  AN_NONE,
332
333  // 64 SGET_BYTE vAA, field@BBBB
334  AN_NONE,
335
336  // 65 SGET_CHAR vAA, field@BBBB
337  AN_NONE,
338
339  // 66 SGET_SHORT vAA, field@BBBB
340  AN_NONE,
341
342  // 67 SPUT vAA, field@BBBB
343  AN_NONE,
344
345  // 68 SPUT_WIDE vAA, field@BBBB
346  AN_NONE,
347
348  // 69 SPUT_OBJECT vAA, field@BBBB
349  AN_NONE,
350
351  // 6A SPUT_BOOLEAN vAA, field@BBBB
352  AN_NONE,
353
354  // 6B SPUT_BYTE vAA, field@BBBB
355  AN_NONE,
356
357  // 6C SPUT_CHAR vAA, field@BBBB
358  AN_NONE,
359
360  // 6D SPUT_SHORT vAA, field@BBBB
361  AN_NONE,
362
363  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
364  AN_INVOKE | AN_HEAVYWEIGHT,
365
366  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
367  AN_INVOKE | AN_HEAVYWEIGHT,
368
369  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
370  AN_INVOKE | AN_HEAVYWEIGHT,
371
372  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
373  AN_INVOKE | AN_HEAVYWEIGHT,
374
375  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
376  AN_INVOKE | AN_HEAVYWEIGHT,
377
378  // 73 UNUSED_73
379  AN_NONE,
380
381  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
382  AN_INVOKE | AN_HEAVYWEIGHT,
383
384  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
385  AN_INVOKE | AN_HEAVYWEIGHT,
386
387  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
388  AN_INVOKE | AN_HEAVYWEIGHT,
389
390  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
391  AN_INVOKE | AN_HEAVYWEIGHT,
392
393  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
394  AN_INVOKE | AN_HEAVYWEIGHT,
395
396  // 79 UNUSED_79
397  AN_NONE,
398
399  // 7A UNUSED_7A
400  AN_NONE,
401
402  // 7B NEG_INT vA, vB
403  AN_MATH | AN_INT,
404
405  // 7C NOT_INT vA, vB
406  AN_MATH | AN_INT,
407
408  // 7D NEG_LONG vA, vB
409  AN_MATH | AN_LONG,
410
411  // 7E NOT_LONG vA, vB
412  AN_MATH | AN_LONG,
413
414  // 7F NEG_FLOAT vA, vB
415  AN_MATH | AN_FP | AN_SINGLE,
416
417  // 80 NEG_DOUBLE vA, vB
418  AN_MATH | AN_FP | AN_DOUBLE,
419
420  // 81 INT_TO_LONG vA, vB
421  AN_MATH | AN_INT | AN_LONG,
422
423  // 82 INT_TO_FLOAT vA, vB
424  AN_MATH | AN_FP | AN_INT | AN_SINGLE,
425
426  // 83 INT_TO_DOUBLE vA, vB
427  AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
428
429  // 84 LONG_TO_INT vA, vB
430  AN_MATH | AN_INT | AN_LONG,
431
432  // 85 LONG_TO_FLOAT vA, vB
433  AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
434
435  // 86 LONG_TO_DOUBLE vA, vB
436  AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
437
438  // 87 FLOAT_TO_INT vA, vB
439  AN_MATH | AN_FP | AN_INT | AN_SINGLE,
440
441  // 88 FLOAT_TO_LONG vA, vB
442  AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
443
444  // 89 FLOAT_TO_DOUBLE vA, vB
445  AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
446
447  // 8A DOUBLE_TO_INT vA, vB
448  AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
449
450  // 8B DOUBLE_TO_LONG vA, vB
451  AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
452
453  // 8C DOUBLE_TO_FLOAT vA, vB
454  AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
455
456  // 8D INT_TO_BYTE vA, vB
457  AN_MATH | AN_INT,
458
459  // 8E INT_TO_CHAR vA, vB
460  AN_MATH | AN_INT,
461
462  // 8F INT_TO_SHORT vA, vB
463  AN_MATH | AN_INT,
464
465  // 90 ADD_INT vAA, vBB, vCC
466  AN_MATH | AN_INT,
467
468  // 91 SUB_INT vAA, vBB, vCC
469  AN_MATH | AN_INT,
470
471  // 92 MUL_INT vAA, vBB, vCC
472  AN_MATH | AN_INT,
473
474  // 93 DIV_INT vAA, vBB, vCC
475  AN_MATH | AN_INT,
476
477  // 94 REM_INT vAA, vBB, vCC
478  AN_MATH | AN_INT,
479
480  // 95 AND_INT vAA, vBB, vCC
481  AN_MATH | AN_INT,
482
483  // 96 OR_INT vAA, vBB, vCC
484  AN_MATH | AN_INT,
485
486  // 97 XOR_INT vAA, vBB, vCC
487  AN_MATH | AN_INT,
488
489  // 98 SHL_INT vAA, vBB, vCC
490  AN_MATH | AN_INT,
491
492  // 99 SHR_INT vAA, vBB, vCC
493  AN_MATH | AN_INT,
494
495  // 9A USHR_INT vAA, vBB, vCC
496  AN_MATH | AN_INT,
497
498  // 9B ADD_LONG vAA, vBB, vCC
499  AN_MATH | AN_LONG,
500
501  // 9C SUB_LONG vAA, vBB, vCC
502  AN_MATH | AN_LONG,
503
504  // 9D MUL_LONG vAA, vBB, vCC
505  AN_MATH | AN_LONG,
506
507  // 9E DIV_LONG vAA, vBB, vCC
508  AN_MATH | AN_LONG,
509
510  // 9F REM_LONG vAA, vBB, vCC
511  AN_MATH | AN_LONG,
512
513  // A0 AND_LONG vAA, vBB, vCC
514  AN_MATH | AN_LONG,
515
516  // A1 OR_LONG vAA, vBB, vCC
517  AN_MATH | AN_LONG,
518
519  // A2 XOR_LONG vAA, vBB, vCC
520  AN_MATH | AN_LONG,
521
522  // A3 SHL_LONG vAA, vBB, vCC
523  AN_MATH | AN_LONG,
524
525  // A4 SHR_LONG vAA, vBB, vCC
526  AN_MATH | AN_LONG,
527
528  // A5 USHR_LONG vAA, vBB, vCC
529  AN_MATH | AN_LONG,
530
531  // A6 ADD_FLOAT vAA, vBB, vCC
532  AN_MATH | AN_FP | AN_SINGLE,
533
534  // A7 SUB_FLOAT vAA, vBB, vCC
535  AN_MATH | AN_FP | AN_SINGLE,
536
537  // A8 MUL_FLOAT vAA, vBB, vCC
538  AN_MATH | AN_FP | AN_SINGLE,
539
540  // A9 DIV_FLOAT vAA, vBB, vCC
541  AN_MATH | AN_FP | AN_SINGLE,
542
543  // AA REM_FLOAT vAA, vBB, vCC
544  AN_MATH | AN_FP | AN_SINGLE,
545
546  // AB ADD_DOUBLE vAA, vBB, vCC
547  AN_MATH | AN_FP | AN_DOUBLE,
548
549  // AC SUB_DOUBLE vAA, vBB, vCC
550  AN_MATH | AN_FP | AN_DOUBLE,
551
552  // AD MUL_DOUBLE vAA, vBB, vCC
553  AN_MATH | AN_FP | AN_DOUBLE,
554
555  // AE DIV_DOUBLE vAA, vBB, vCC
556  AN_MATH | AN_FP | AN_DOUBLE,
557
558  // AF REM_DOUBLE vAA, vBB, vCC
559  AN_MATH | AN_FP | AN_DOUBLE,
560
561  // B0 ADD_INT_2ADDR vA, vB
562  AN_MATH | AN_INT,
563
564  // B1 SUB_INT_2ADDR vA, vB
565  AN_MATH | AN_INT,
566
567  // B2 MUL_INT_2ADDR vA, vB
568  AN_MATH | AN_INT,
569
570  // B3 DIV_INT_2ADDR vA, vB
571  AN_MATH | AN_INT,
572
573  // B4 REM_INT_2ADDR vA, vB
574  AN_MATH | AN_INT,
575
576  // B5 AND_INT_2ADDR vA, vB
577  AN_MATH | AN_INT,
578
579  // B6 OR_INT_2ADDR vA, vB
580  AN_MATH | AN_INT,
581
582  // B7 XOR_INT_2ADDR vA, vB
583  AN_MATH | AN_INT,
584
585  // B8 SHL_INT_2ADDR vA, vB
586  AN_MATH | AN_INT,
587
588  // B9 SHR_INT_2ADDR vA, vB
589  AN_MATH | AN_INT,
590
591  // BA USHR_INT_2ADDR vA, vB
592  AN_MATH | AN_INT,
593
594  // BB ADD_LONG_2ADDR vA, vB
595  AN_MATH | AN_LONG,
596
597  // BC SUB_LONG_2ADDR vA, vB
598  AN_MATH | AN_LONG,
599
600  // BD MUL_LONG_2ADDR vA, vB
601  AN_MATH | AN_LONG,
602
603  // BE DIV_LONG_2ADDR vA, vB
604  AN_MATH | AN_LONG,
605
606  // BF REM_LONG_2ADDR vA, vB
607  AN_MATH | AN_LONG,
608
609  // C0 AND_LONG_2ADDR vA, vB
610  AN_MATH | AN_LONG,
611
612  // C1 OR_LONG_2ADDR vA, vB
613  AN_MATH | AN_LONG,
614
615  // C2 XOR_LONG_2ADDR vA, vB
616  AN_MATH | AN_LONG,
617
618  // C3 SHL_LONG_2ADDR vA, vB
619  AN_MATH | AN_LONG,
620
621  // C4 SHR_LONG_2ADDR vA, vB
622  AN_MATH | AN_LONG,
623
624  // C5 USHR_LONG_2ADDR vA, vB
625  AN_MATH | AN_LONG,
626
627  // C6 ADD_FLOAT_2ADDR vA, vB
628  AN_MATH | AN_FP | AN_SINGLE,
629
630  // C7 SUB_FLOAT_2ADDR vA, vB
631  AN_MATH | AN_FP | AN_SINGLE,
632
633  // C8 MUL_FLOAT_2ADDR vA, vB
634  AN_MATH | AN_FP | AN_SINGLE,
635
636  // C9 DIV_FLOAT_2ADDR vA, vB
637  AN_MATH | AN_FP | AN_SINGLE,
638
639  // CA REM_FLOAT_2ADDR vA, vB
640  AN_MATH | AN_FP | AN_SINGLE,
641
642  // CB ADD_DOUBLE_2ADDR vA, vB
643  AN_MATH | AN_FP | AN_DOUBLE,
644
645  // CC SUB_DOUBLE_2ADDR vA, vB
646  AN_MATH | AN_FP | AN_DOUBLE,
647
648  // CD MUL_DOUBLE_2ADDR vA, vB
649  AN_MATH | AN_FP | AN_DOUBLE,
650
651  // CE DIV_DOUBLE_2ADDR vA, vB
652  AN_MATH | AN_FP | AN_DOUBLE,
653
654  // CF REM_DOUBLE_2ADDR vA, vB
655  AN_MATH | AN_FP | AN_DOUBLE,
656
657  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
658  AN_MATH | AN_INT,
659
660  // D1 RSUB_INT vA, vB, #+CCCC
661  AN_MATH | AN_INT,
662
663  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
664  AN_MATH | AN_INT,
665
666  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
667  AN_MATH | AN_INT,
668
669  // D4 REM_INT_LIT16 vA, vB, #+CCCC
670  AN_MATH | AN_INT,
671
672  // D5 AND_INT_LIT16 vA, vB, #+CCCC
673  AN_MATH | AN_INT,
674
675  // D6 OR_INT_LIT16 vA, vB, #+CCCC
676  AN_MATH | AN_INT,
677
678  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
679  AN_MATH | AN_INT,
680
681  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
682  AN_MATH | AN_INT,
683
684  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
685  AN_MATH | AN_INT,
686
687  // DA MUL_INT_LIT8 vAA, vBB, #+CC
688  AN_MATH | AN_INT,
689
690  // DB DIV_INT_LIT8 vAA, vBB, #+CC
691  AN_MATH | AN_INT,
692
693  // DC REM_INT_LIT8 vAA, vBB, #+CC
694  AN_MATH | AN_INT,
695
696  // DD AND_INT_LIT8 vAA, vBB, #+CC
697  AN_MATH | AN_INT,
698
699  // DE OR_INT_LIT8 vAA, vBB, #+CC
700  AN_MATH | AN_INT,
701
702  // DF XOR_INT_LIT8 vAA, vBB, #+CC
703  AN_MATH | AN_INT,
704
705  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
706  AN_MATH | AN_INT,
707
708  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
709  AN_MATH | AN_INT,
710
711  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
712  AN_MATH | AN_INT,
713
714  // E3 IGET_VOLATILE
715  AN_NONE,
716
717  // E4 IPUT_VOLATILE
718  AN_NONE,
719
720  // E5 SGET_VOLATILE
721  AN_NONE,
722
723  // E6 SPUT_VOLATILE
724  AN_NONE,
725
726  // E7 IGET_OBJECT_VOLATILE
727  AN_NONE,
728
729  // E8 IGET_WIDE_VOLATILE
730  AN_NONE,
731
732  // E9 IPUT_WIDE_VOLATILE
733  AN_NONE,
734
735  // EA SGET_WIDE_VOLATILE
736  AN_NONE,
737
738  // EB SPUT_WIDE_VOLATILE
739  AN_NONE,
740
741  // EC BREAKPOINT
742  AN_NONE,
743
744  // ED THROW_VERIFICATION_ERROR
745  AN_HEAVYWEIGHT | AN_BRANCH,
746
747  // EE EXECUTE_INLINE
748  AN_NONE,
749
750  // EF EXECUTE_INLINE_RANGE
751  AN_NONE,
752
753  // F0 INVOKE_OBJECT_INIT_RANGE
754  AN_INVOKE | AN_HEAVYWEIGHT,
755
756  // F1 RETURN_VOID_BARRIER
757  AN_BRANCH,
758
759  // F2 IGET_QUICK
760  AN_NONE,
761
762  // F3 IGET_WIDE_QUICK
763  AN_NONE,
764
765  // F4 IGET_OBJECT_QUICK
766  AN_NONE,
767
768  // F5 IPUT_QUICK
769  AN_NONE,
770
771  // F6 IPUT_WIDE_QUICK
772  AN_NONE,
773
774  // F7 IPUT_OBJECT_QUICK
775  AN_NONE,
776
777  // F8 INVOKE_VIRTUAL_QUICK
778  AN_INVOKE | AN_HEAVYWEIGHT,
779
780  // F9 INVOKE_VIRTUAL_QUICK_RANGE
781  AN_INVOKE | AN_HEAVYWEIGHT,
782
783  // FA INVOKE_SUPER_QUICK
784  AN_INVOKE | AN_HEAVYWEIGHT,
785
786  // FB INVOKE_SUPER_QUICK_RANGE
787  AN_INVOKE | AN_HEAVYWEIGHT,
788
789  // FC IPUT_OBJECT_VOLATILE
790  AN_NONE,
791
792  // FD SGET_OBJECT_VOLATILE
793  AN_NONE,
794
795  // FE SPUT_OBJECT_VOLATILE
796  AN_NONE,
797
798  // FF UNUSED_FF
799  AN_NONE,
800
801  // Beginning of extended MIR opcodes
802  // 100 MIR_PHI
803  AN_NONE,
804
805  // 101 MIR_COPY
806  AN_NONE,
807
808  // 102 MIR_FUSED_CMPL_FLOAT
809  AN_NONE,
810
811  // 103 MIR_FUSED_CMPG_FLOAT
812  AN_NONE,
813
814  // 104 MIR_FUSED_CMPL_DOUBLE
815  AN_NONE,
816
817  // 105 MIR_FUSED_CMPG_DOUBLE
818  AN_NONE,
819
820  // 106 MIR_FUSED_CMP_LONG
821  AN_NONE,
822
823  // 107 MIR_NOP
824  AN_NONE,
825
826  // 108 MIR_NULL_CHECK
827  AN_NONE,
828
829  // 109 MIR_RANGE_CHECK
830  AN_NONE,
831
832  // 110 MIR_DIV_ZERO_CHECK
833  AN_NONE,
834
835  // 111 MIR_CHECK
836  AN_NONE,
837
838  // 112 MIR_CHECKPART2
839  AN_NONE,
840
841  // 113 MIR_SELECT
842  AN_NONE,
843};
844
845struct MethodStats {
846  int dex_instructions;
847  int math_ops;
848  int fp_ops;
849  int array_ops;
850  int branch_ops;
851  int heavyweight_ops;
852  bool has_computational_loop;
853  bool has_switch;
854  float math_ratio;
855  float fp_ratio;
856  float array_ratio;
857  float branch_ratio;
858  float heavyweight_ratio;
859};
860
861void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
862  if (bb->visited || (bb->block_type != kDalvikByteCode)) {
863    return;
864  }
865  bool computational_block = true;
866  bool has_math = false;
867  /*
868   * For the purposes of this scan, we want to treat the set of basic blocks broken
869   * by an exception edge as a single basic block.  We'll scan forward along the fallthrough
870   * edges until we reach an explicit branch or return.
871   */
872  BasicBlock* ending_bb = bb;
873  if (ending_bb->last_mir_insn != NULL) {
874    uint32_t ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
875    while ((ending_flags & AN_BRANCH) == 0) {
876      ending_bb = GetBasicBlock(ending_bb->fall_through);
877      ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
878    }
879  }
880  /*
881   * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
882   * first need to do some expensive loop detection - and the point of this is to make
883   * an informed guess before investing in computation.  However, we can cheaply detect
884   * many simple loop forms without having to do full dataflow analysis.
885   */
886  int loop_scale_factor = 1;
887  // Simple for and while loops
888  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
889    if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
890        (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
891      loop_scale_factor = 25;
892    }
893  }
894  // Simple do-while loop
895  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
896    loop_scale_factor = 25;
897  }
898
899  BasicBlock* tbb = bb;
900  bool done = false;
901  while (!done) {
902    tbb->visited = true;
903    for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
904      if (static_cast<uint32_t>(mir->dalvikInsn.opcode) >= kMirOpFirst) {
905        // Skip any MIR pseudo-op.
906        continue;
907      }
908      uint32_t flags = analysis_attributes_[mir->dalvikInsn.opcode];
909      stats->dex_instructions += loop_scale_factor;
910      if ((flags & AN_BRANCH) == 0) {
911        computational_block &= ((flags & AN_COMPUTATIONAL) != 0);
912      } else {
913        stats->branch_ops += loop_scale_factor;
914      }
915      if ((flags & AN_MATH) != 0) {
916        stats->math_ops += loop_scale_factor;
917        has_math = true;
918      }
919      if ((flags & AN_FP) != 0) {
920        stats->fp_ops += loop_scale_factor;
921      }
922      if ((flags & AN_ARRAYOP) != 0) {
923        stats->array_ops += loop_scale_factor;
924      }
925      if ((flags & AN_HEAVYWEIGHT) != 0) {
926        stats->heavyweight_ops += loop_scale_factor;
927      }
928      if ((flags & AN_SWITCH) != 0) {
929        stats->has_switch = true;
930      }
931    }
932    if (tbb == ending_bb) {
933      done = true;
934    } else {
935      tbb = GetBasicBlock(tbb->fall_through);
936    }
937  }
938  if (has_math && computational_block && (loop_scale_factor > 1)) {
939    stats->has_computational_loop = true;
940  }
941}
942
943bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default) {
944  float count = stats->dex_instructions;
945  stats->math_ratio = stats->math_ops / count;
946  stats->fp_ratio = stats->fp_ops / count;
947  stats->branch_ratio = stats->branch_ops / count;
948  stats->array_ratio = stats->array_ops / count;
949  stats->heavyweight_ratio = stats->heavyweight_ops / count;
950
951  if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
952    LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
953              << stats->math_ratio << ", fp:"
954              << stats->fp_ratio << ", br:"
955              << stats->branch_ratio << ", hw:"
956              << stats->heavyweight_ratio << ", arr:"
957              << stats->array_ratio << ", hot:"
958              << stats->has_computational_loop << ", "
959              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
960  }
961
962  // Computation intensive?
963  if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
964    return false;
965  }
966
967  // Complex, logic-intensive?
968  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
969      stats->branch_ratio > 0.3) {
970    return false;
971  }
972
973  // Significant floating point?
974  if (stats->fp_ratio > 0.05) {
975    return false;
976  }
977
978  // Significant generic math?
979  if (stats->math_ratio > 0.3) {
980    return false;
981  }
982
983  // If array-intensive, compiling is probably worthwhile.
984  if (stats->array_ratio > 0.1) {
985    return false;
986  }
987
988  // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
989  if (stats->has_switch) {
990    return false;
991  }
992
993  // If significant in size and high proportion of expensive operations, skip.
994  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
995      (stats->heavyweight_ratio > 0.3)) {
996    return true;
997  }
998
999  return skip_default;
1000}
1001
1002 /*
1003  * Will eventually want this to be a bit more sophisticated and happen at verification time.
1004  */
1005bool MIRGraph::SkipCompilation() {
1006  const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
1007  CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
1008  if (compiler_filter == CompilerOptions::kEverything) {
1009    return false;
1010  }
1011
1012  // Contains a pattern we don't want to compile?
1013  if (punt_to_interpreter_) {
1014    return true;
1015  }
1016
1017  if (!compiler_options.IsCompilationEnabled() || compiler_filter == CompilerOptions::kProfiled) {
1018    return true;
1019  }
1020
1021  // Set up compilation cutoffs based on current filter mode.
1022  size_t small_cutoff = 0;
1023  size_t default_cutoff = 0;
1024  switch (compiler_filter) {
1025    case CompilerOptions::kBalanced:
1026      small_cutoff = compiler_options.GetSmallMethodThreshold();
1027      default_cutoff = compiler_options.GetLargeMethodThreshold();
1028      break;
1029    case CompilerOptions::kSpace:
1030      small_cutoff = compiler_options.GetTinyMethodThreshold();
1031      default_cutoff = compiler_options.GetSmallMethodThreshold();
1032      break;
1033    case CompilerOptions::kSpeed:
1034      small_cutoff = compiler_options.GetHugeMethodThreshold();
1035      default_cutoff = compiler_options.GetHugeMethodThreshold();
1036      break;
1037    default:
1038      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
1039  }
1040
1041  // If size < cutoff, assume we'll compile - but allow removal.
1042  bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
1043
1044  /*
1045   * Filter 1: Huge methods are likely to be machine generated, but some aren't.
1046   * If huge, assume we won't compile, but allow futher analysis to turn it back on.
1047   */
1048  if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
1049    skip_compilation = true;
1050    // If we're got a huge number of basic blocks, don't bother with further analysis.
1051    if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
1052      return true;
1053    }
1054  } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
1055    /* If it's large and contains no branches, it's likely to be machine generated initialization */
1056      (GetBranchCount() == 0)) {
1057    return true;
1058  } else if (compiler_filter == CompilerOptions::kSpeed) {
1059    // If not huge, compile.
1060    return false;
1061  }
1062
1063  // Filter 2: Skip class initializers.
1064  if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
1065    return true;
1066  }
1067
1068  // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
1069  if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
1070      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1071          ->IsSpecial(cu_->method_idx)) {
1072    return false;
1073  }
1074
1075  // Filter 4: if small, just compile.
1076  if (GetNumDalvikInsns() < small_cutoff) {
1077    return false;
1078  }
1079
1080  // Analyze graph for:
1081  //  o floating point computation
1082  //  o basic blocks contained in loop with heavy arithmetic.
1083  //  o proportion of conditional branches.
1084
1085  MethodStats stats;
1086  memset(&stats, 0, sizeof(stats));
1087
1088  ClearAllVisitedFlags();
1089  AllNodesIterator iter(this);
1090  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1091    AnalyzeBlock(bb, &stats);
1092  }
1093
1094  return ComputeSkipCompilation(&stats, skip_compilation);
1095}
1096
1097void MIRGraph::DoCacheFieldLoweringInfo() {
1098  // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
1099  const uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
1100  ScopedArenaAllocator allocator(&cu_->arena_stack);
1101  uint16_t* field_idxs =
1102      reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
1103
1104  // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
1105  size_t ifield_pos = 0u;
1106  size_t sfield_pos = max_refs;
1107  AllNodesIterator iter(this);
1108  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1109    if (bb->block_type != kDalvikByteCode) {
1110      continue;
1111    }
1112    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1113      if (mir->dalvikInsn.opcode >= Instruction::IGET &&
1114          mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
1115        const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
1116        // Get field index and try to find it among existing indexes. If found, it's usually among
1117        // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
1118        // is a linear search, it actually performs much better than map based approach.
1119        if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
1120          uint16_t field_idx = insn->VRegC_22c();
1121          size_t i = ifield_pos;
1122          while (i != 0u && field_idxs[i - 1] != field_idx) {
1123            --i;
1124          }
1125          if (i != 0u) {
1126            mir->meta.ifield_lowering_info = i - 1;
1127          } else {
1128            mir->meta.ifield_lowering_info = ifield_pos;
1129            field_idxs[ifield_pos++] = field_idx;
1130          }
1131        } else {
1132          uint16_t field_idx = insn->VRegB_21c();
1133          size_t i = sfield_pos;
1134          while (i != max_refs && field_idxs[i] != field_idx) {
1135            ++i;
1136          }
1137          if (i != max_refs) {
1138            mir->meta.sfield_lowering_info = max_refs - i - 1u;
1139          } else {
1140            mir->meta.sfield_lowering_info = max_refs - sfield_pos;
1141            field_idxs[--sfield_pos] = field_idx;
1142          }
1143        }
1144        DCHECK_LE(ifield_pos, sfield_pos);
1145      }
1146    }
1147  }
1148
1149  if (ifield_pos != 0u) {
1150    // Resolve instance field infos.
1151    DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
1152    ifield_lowering_infos_.Resize(ifield_pos);
1153    for (size_t pos = 0u; pos != ifield_pos; ++pos) {
1154      ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
1155    }
1156    MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1157                                ifield_lowering_infos_.GetRawStorage(), ifield_pos);
1158  }
1159
1160  if (sfield_pos != max_refs) {
1161    // Resolve static field infos.
1162    DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
1163    sfield_lowering_infos_.Resize(max_refs - sfield_pos);
1164    for (size_t pos = max_refs; pos != sfield_pos;) {
1165      --pos;
1166      sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
1167    }
1168    MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1169                                sfield_lowering_infos_.GetRawStorage(), max_refs - sfield_pos);
1170  }
1171}
1172
1173void MIRGraph::DoCacheMethodLoweringInfo() {
1174  static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
1175
1176  // Embed the map value in the entry to avoid extra padding in 64-bit builds.
1177  struct MapEntry {
1178    // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
1179    const MethodReference* devirt_target;
1180    uint16_t target_method_idx;
1181    uint16_t invoke_type;
1182    // Map value.
1183    uint32_t lowering_info_index;
1184  };
1185
1186  // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
1187  struct MapEntryComparator {
1188    bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1189      if (lhs.target_method_idx != rhs.target_method_idx) {
1190        return lhs.target_method_idx < rhs.target_method_idx;
1191      }
1192      if (lhs.invoke_type != rhs.invoke_type) {
1193        return lhs.invoke_type < rhs.invoke_type;
1194      }
1195      if (lhs.devirt_target != rhs.devirt_target) {
1196        if (lhs.devirt_target == nullptr) {
1197          return true;
1198        }
1199        if (rhs.devirt_target == nullptr) {
1200          return false;
1201        }
1202        return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
1203      }
1204      return false;
1205    }
1206    MethodReferenceComparator devirt_cmp;
1207  };
1208
1209  ScopedArenaAllocator allocator(&cu_->arena_stack);
1210
1211  // All INVOKE instructions take 3 code units and there must also be a RETURN.
1212  uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
1213
1214  // Map invoke key (see MapEntry) to lowering info index and vice versa.
1215  // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
1216  // multi_index_container with one ordered index and one sequential index.
1217  ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
1218                                                          allocator.Adapter());
1219  const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
1220      allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
1221
1222  // Find INVOKE insns and their devirtualization targets.
1223  AllNodesIterator iter(this);
1224  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1225    if (bb->block_type != kDalvikByteCode) {
1226      continue;
1227    }
1228    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1229      if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
1230          mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
1231          mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
1232        // Decode target method index and invoke type.
1233        const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
1234        uint16_t target_method_idx;
1235        uint16_t invoke_type_idx;
1236        if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
1237          target_method_idx = insn->VRegB_35c();
1238          invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
1239        } else {
1240          target_method_idx = insn->VRegB_3rc();
1241          invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
1242        }
1243
1244        // Find devirtualization target.
1245        // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
1246        // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
1247        // and increment it as needed instead of making O(log n) lookups.
1248        const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
1249        const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
1250
1251        // Try to insert a new entry. If the insertion fails, we will have found an old one.
1252        MapEntry entry = {
1253            devirt_target,
1254            target_method_idx,
1255            invoke_types[invoke_type_idx],
1256            static_cast<uint32_t>(invoke_map.size())
1257        };
1258        auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
1259        mir->meta.method_lowering_info = it->lowering_info_index;
1260        // If we didn't actually insert, this will just overwrite an existing value with the same.
1261        sequential_entries[it->lowering_info_index] = &*it;
1262      }
1263    }
1264  }
1265
1266  if (invoke_map.empty()) {
1267    return;
1268  }
1269
1270  // Prepare unique method infos, set method info indexes for their MIRs.
1271  DCHECK_EQ(method_lowering_infos_.Size(), 0u);
1272  const size_t count = invoke_map.size();
1273  method_lowering_infos_.Resize(count);
1274  for (size_t pos = 0u; pos != count; ++pos) {
1275    const MapEntry* entry = sequential_entries[pos];
1276    MirMethodLoweringInfo method_info(entry->target_method_idx,
1277                                      static_cast<InvokeType>(entry->invoke_type));
1278    if (entry->devirt_target != nullptr) {
1279      method_info.SetDevirtualizationTarget(*entry->devirt_target);
1280    }
1281    method_lowering_infos_.Insert(method_info);
1282  }
1283  MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1284                                 method_lowering_infos_.GetRawStorage(), count);
1285}
1286
1287bool MIRGraph::SkipCompilation(const std::string& methodname) {
1288  return cu_->compiler_driver->SkipCompilation(methodname);
1289}
1290
1291}  // namespace art
1292