local_optimizations.cc revision 2ce745c06271d5223d57dbf08117b20d5b60694a
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_internals.h" 18 19namespace art { 20 21#define DEBUG_OPT(X) 22 23/* Check RAW, WAR, and RAW dependency on the register operands */ 24#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \ 25 ((use | def) & check->def_mask)) 26 27/* Scheduler heuristics */ 28#define MAX_HOIST_DISTANCE 20 29#define LDLD_DISTANCE 4 30#define LD_LATENCY 2 31 32static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) { 33 int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info); 34 int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info); 35 int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info); 36 int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info); 37 38 return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo); 39} 40 41/* Convert a more expensive instruction (ie load) into a move */ 42void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) { 43 /* Insert a move to replace the load */ 44 LIR* move_lir; 45 move_lir = OpRegCopyNoInsert(dest, src); 46 /* 47 * Insert the converted instruction after the original since the 48 * optimization is scannng in the top-down order and the new instruction 49 * will need to be re-checked (eg the new dest clobbers the src used in 50 * this_lir). 51 */ 52 InsertLIRAfter(orig_lir, move_lir); 53} 54 55/* 56 * Perform a pass of top-down walk, from the second-last instruction in the 57 * superblock, to eliminate redundant loads and stores. 58 * 59 * An earlier load can eliminate a later load iff 60 * 1) They are must-aliases 61 * 2) The native register is not clobbered in between 62 * 3) The memory location is not written to in between 63 * 64 * An earlier store can eliminate a later load iff 65 * 1) They are must-aliases 66 * 2) The native register is not clobbered in between 67 * 3) The memory location is not written to in between 68 * 69 * A later store can be eliminated by an earlier store iff 70 * 1) They are must-aliases 71 * 2) The memory location is not written to in between 72 */ 73void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { 74 LIR* this_lir; 75 76 if (head_lir == tail_lir) return; 77 78 for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) { 79 80 if (is_pseudo_opcode(this_lir->opcode)) continue; 81 82 int sink_distance = 0; 83 84 uint64_t target_flags = GetTargetInstFlags(this_lir->opcode); 85 86 /* Skip non-interesting instructions */ 87 if ((this_lir->flags.is_nop == true) || 88 (target_flags & IS_BRANCH) || 89 ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) || // Skip wide loads. 90 ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) == 91 (REG_USE0 | REG_USE1 | REG_USE2)) || // Skip wide stores. 92 !(target_flags & (IS_LOAD | IS_STORE))) { 93 continue; 94 } 95 96 int native_reg_id; 97 if (cu_->instruction_set == kX86) { 98 // If x86, location differs depending on whether memory/reg operation. 99 native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2] 100 : this_lir->operands[0]; 101 } else { 102 native_reg_id = this_lir->operands[0]; 103 } 104 bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD; 105 LIR* check_lir; 106 /* Use the mem mask to determine the rough memory location */ 107 uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM; 108 109 /* 110 * Currently only eliminate redundant ld/st for constant and Dalvik 111 * register accesses. 112 */ 113 if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue; 114 115 uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; 116 uint64_t stop_use_reg_mask; 117 if (cu_->instruction_set == kX86) { 118 stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM; 119 } else { 120 /* 121 * Add pc to the resource mask to prevent this instruction 122 * from sinking past branch instructions. Also take out the memory 123 * region bits since stop_mask is used to check data/control 124 * dependencies. 125 */ 126 stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM; 127 } 128 129 for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) { 130 131 /* 132 * Skip already dead instructions (whose dataflow information is 133 * outdated and misleading). 134 */ 135 if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue; 136 137 uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM; 138 uint64_t alias_condition = this_mem_mask & check_mem_mask; 139 bool stop_here = false; 140 141 /* 142 * Potential aliases seen - check the alias relations 143 */ 144 uint64_t check_flags = GetTargetInstFlags(check_lir->opcode); 145 // TUNING: Support instructions with multiple register targets. 146 if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) { 147 stop_here = true; 148 } else if (check_mem_mask != ENCODE_MEM && alias_condition != 0) { 149 bool is_check_lir_load = check_flags & IS_LOAD; 150 if (alias_condition == ENCODE_LITERAL) { 151 /* 152 * Should only see literal loads in the instruction 153 * stream. 154 */ 155 DCHECK(!(check_flags & IS_STORE)); 156 /* Same value && same register type */ 157 if (check_lir->alias_info == this_lir->alias_info && 158 SameRegType(check_lir->operands[0], native_reg_id)) { 159 /* 160 * Different destination register - insert 161 * a move 162 */ 163 if (check_lir->operands[0] != native_reg_id) { 164 ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id); 165 } 166 check_lir->flags.is_nop = true; 167 } 168 } else if (alias_condition == ENCODE_DALVIK_REG) { 169 /* Must alias */ 170 if (check_lir->alias_info == this_lir->alias_info) { 171 /* Only optimize compatible registers */ 172 bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id); 173 if ((is_this_lir_load && is_check_lir_load) || 174 (!is_this_lir_load && is_check_lir_load)) { 175 /* RAR or RAW */ 176 if (reg_compatible) { 177 /* 178 * Different destination register - 179 * insert a move 180 */ 181 if (check_lir->operands[0] != 182 native_reg_id) { 183 ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id); 184 } 185 check_lir->flags.is_nop = true; 186 } else { 187 /* 188 * Destinaions are of different types - 189 * something complicated going on so 190 * stop looking now. 191 */ 192 stop_here = true; 193 } 194 } else if (is_this_lir_load && !is_check_lir_load) { 195 /* WAR - register value is killed */ 196 stop_here = true; 197 } else if (!is_this_lir_load && !is_check_lir_load) { 198 /* WAW - nuke the earlier store */ 199 this_lir->flags.is_nop = true; 200 stop_here = true; 201 } 202 /* Partial overlap */ 203 } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) { 204 /* 205 * It is actually ok to continue if check_lir 206 * is a read. But it is hard to make a test 207 * case for this so we just stop here to be 208 * conservative. 209 */ 210 stop_here = true; 211 } 212 } 213 /* Memory content may be updated. Stop looking now. */ 214 if (stop_here) { 215 break; 216 /* The check_lir has been transformed - check the next one */ 217 } else if (check_lir->flags.is_nop) { 218 continue; 219 } 220 } 221 222 223 /* 224 * this and check LIRs have no memory dependency. Now check if 225 * their register operands have any RAW, WAR, and WAW 226 * dependencies. If so, stop looking. 227 */ 228 if (stop_here == false) { 229 stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir); 230 } 231 232 if (stop_here == true) { 233 if (cu_->instruction_set == kX86) { 234 // Prevent stores from being sunk between ops that generate ccodes and 235 // ops that use them. 236 uint64_t flags = GetTargetInstFlags(check_lir->opcode); 237 if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) { 238 check_lir = PREV_LIR(check_lir); 239 sink_distance--; 240 } 241 } 242 DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED")); 243 /* Only sink store instructions */ 244 if (sink_distance && !is_this_lir_load) { 245 LIR* new_store_lir = 246 static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR)); 247 *new_store_lir = *this_lir; 248 /* 249 * Stop point found - insert *before* the check_lir 250 * since the instruction list is scanned in the 251 * top-down order. 252 */ 253 InsertLIRBefore(check_lir, new_store_lir); 254 this_lir->flags.is_nop = true; 255 } 256 break; 257 } else if (!check_lir->flags.is_nop) { 258 sink_distance++; 259 } 260 } 261 } 262} 263 264/* 265 * Perform a pass of bottom-up walk, from the second instruction in the 266 * superblock, to try to hoist loads to earlier slots. 267 */ 268void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { 269 LIR* this_lir, *check_lir; 270 /* 271 * Store the list of independent instructions that can be hoisted past. 272 * Will decide the best place to insert later. 273 */ 274 LIR* prev_inst_list[MAX_HOIST_DISTANCE]; 275 276 /* Empty block */ 277 if (head_lir == tail_lir) return; 278 279 /* Start from the second instruction */ 280 for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) { 281 282 if (is_pseudo_opcode(this_lir->opcode)) continue; 283 284 uint64_t target_flags = GetTargetInstFlags(this_lir->opcode); 285 /* Skip non-interesting instructions */ 286 if ((this_lir->flags.is_nop == true) || 287 ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) || 288 !(target_flags & IS_LOAD)) { 289 continue; 290 } 291 292 uint64_t stop_use_all_mask = this_lir->use_mask; 293 294 if (cu_->instruction_set != kX86) { 295 /* 296 * Branches for null/range checks are marked with the true resource 297 * bits, and loads to Dalvik registers, constant pools, and non-alias 298 * locations are safe to be hoisted. So only mark the heap references 299 * conservatively here. 300 */ 301 if (stop_use_all_mask & ENCODE_HEAP_REF) { 302 stop_use_all_mask |= GetPCUseDefEncoding(); 303 } 304 } 305 306 /* Similar as above, but just check for pure register dependency */ 307 uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM; 308 uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; 309 310 int next_slot = 0; 311 bool stop_here = false; 312 313 /* Try to hoist the load to a good spot */ 314 for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) { 315 316 /* 317 * Skip already dead instructions (whose dataflow information is 318 * outdated and misleading). 319 */ 320 if (check_lir->flags.is_nop) continue; 321 322 uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM; 323 uint64_t alias_condition = stop_use_all_mask & check_mem_mask; 324 stop_here = false; 325 326 /* Potential WAR alias seen - check the exact relation */ 327 if (check_mem_mask != ENCODE_MEM && alias_condition != 0) { 328 /* We can fully disambiguate Dalvik references */ 329 if (alias_condition == ENCODE_DALVIK_REG) { 330 /* Must alias or partually overlap */ 331 if ((check_lir->alias_info == this_lir->alias_info) || 332 IsDalvikRegisterClobbered(this_lir, check_lir)) { 333 stop_here = true; 334 } 335 /* Conservatively treat all heap refs as may-alias */ 336 } else { 337 DCHECK_EQ(alias_condition, ENCODE_HEAP_REF); 338 stop_here = true; 339 } 340 /* Memory content may be updated. Stop looking now. */ 341 if (stop_here) { 342 prev_inst_list[next_slot++] = check_lir; 343 break; 344 } 345 } 346 347 if (stop_here == false) { 348 stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, 349 check_lir); 350 } 351 352 /* 353 * Store the dependent or non-pseudo/indepedent instruction to the 354 * list. 355 */ 356 if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { 357 prev_inst_list[next_slot++] = check_lir; 358 if (next_slot == MAX_HOIST_DISTANCE) break; 359 } 360 361 /* Found a new place to put the load - move it here */ 362 if (stop_here == true) { 363 DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP")); 364 break; 365 } 366 } 367 368 /* 369 * Reached the top - use head_lir as the dependent marker as all labels 370 * are barriers. 371 */ 372 if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) { 373 prev_inst_list[next_slot++] = head_lir; 374 } 375 376 /* 377 * At least one independent instruction is found. Scan in the reversed 378 * direction to find a beneficial slot. 379 */ 380 if (next_slot >= 2) { 381 int first_slot = next_slot - 2; 382 int slot; 383 LIR* dep_lir = prev_inst_list[next_slot-1]; 384 /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */ 385 if (!is_pseudo_opcode(dep_lir->opcode) && 386 (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) { 387 first_slot -= LDLD_DISTANCE; 388 } 389 /* 390 * Make sure we check slot >= 0 since first_slot may be negative 391 * when the loop is first entered. 392 */ 393 for (slot = first_slot; slot >= 0; slot--) { 394 LIR* cur_lir = prev_inst_list[slot]; 395 LIR* prev_lir = prev_inst_list[slot+1]; 396 397 /* Check the highest instruction */ 398 if (prev_lir->def_mask == ENCODE_ALL) { 399 /* 400 * If the first instruction is a load, don't hoist anything 401 * above it since it is unlikely to be beneficial. 402 */ 403 if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue; 404 /* 405 * If the remaining number of slots is less than LD_LATENCY, 406 * insert the hoisted load here. 407 */ 408 if (slot < LD_LATENCY) break; 409 } 410 411 // Don't look across a barrier label 412 if ((prev_lir->opcode == kPseudoTargetLabel) || 413 (prev_lir->opcode == kPseudoSafepointPC) || 414 (prev_lir->opcode == kPseudoBarrier)) { 415 break; 416 } 417 418 /* 419 * Try to find two instructions with load/use dependency until 420 * the remaining instructions are less than LD_LATENCY. 421 */ 422 bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false : 423 (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD); 424 if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) { 425 break; 426 } 427 } 428 429 /* Found a slot to hoist to */ 430 if (slot >= 0) { 431 LIR* cur_lir = prev_inst_list[slot]; 432 LIR* new_load_lir = 433 static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR)); 434 *new_load_lir = *this_lir; 435 /* 436 * Insertion is guaranteed to succeed since check_lir 437 * is never the first LIR on the list 438 */ 439 InsertLIRBefore(cur_lir, new_load_lir); 440 this_lir->flags.is_nop = true; 441 } 442 } 443 } 444} 445 446void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) { 447 if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) { 448 ApplyLoadStoreElimination(head_lir, tail_lir); 449 } 450 if (!(cu_->disable_opt & (1 << kLoadHoisting))) { 451 ApplyLoadHoisting(head_lir, tail_lir); 452 } 453} 454 455/* 456 * Nop any unconditional branches that go to the next instruction. 457 * Note: new redundant branches may be inserted later, and we'll 458 * use a check in final instruction assembly to nop those out. 459 */ 460void Mir2Lir::RemoveRedundantBranches() { 461 LIR* this_lir; 462 463 for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) { 464 465 /* Branch to the next instruction */ 466 if (IsUnconditionalBranch(this_lir)) { 467 LIR* next_lir = this_lir; 468 469 while (true) { 470 next_lir = NEXT_LIR(next_lir); 471 472 /* 473 * Is the branch target the next instruction? 474 */ 475 if (next_lir == this_lir->target) { 476 this_lir->flags.is_nop = true; 477 break; 478 } 479 480 /* 481 * Found real useful stuff between the branch and the target. 482 * Need to explicitly check the last_lir_insn_ here because it 483 * might be the last real instruction. 484 */ 485 if (!is_pseudo_opcode(next_lir->opcode) || 486 (next_lir == last_lir_insn_)) 487 break; 488 } 489 } 490 } 491} 492 493} // namespace art 494