utility_mips.cc revision 2689fbad6b5ec1ae8f8c8791a80c6fd3cf24144d
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "codegen_mips.h" 18#include "dex/quick/mir_to_lir-inl.h" 19#include "mips_lir.h" 20 21namespace art { 22 23/* This file contains codegen for the MIPS32 ISA. */ 24LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { 25 int opcode; 26 /* must be both DOUBLE or both not DOUBLE */ 27 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble()); 28 if (r_dest.IsDouble()) { 29 opcode = kMipsFmovd; 30 } else { 31 if (r_dest.IsSingle()) { 32 if (r_src.IsSingle()) { 33 opcode = kMipsFmovs; 34 } else { 35 /* note the operands are swapped for the mtc1 instr */ 36 RegStorage t_opnd = r_src; 37 r_src = r_dest; 38 r_dest = t_opnd; 39 opcode = kMipsMtc1; 40 } 41 } else { 42 DCHECK(r_src.IsSingle()); 43 opcode = kMipsMfc1; 44 } 45 } 46 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg()); 47 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { 48 res->flags.is_nop = true; 49 } 50 return res; 51} 52 53bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) { 54 return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768))); 55} 56 57bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) { 58 return false; // TUNING 59} 60 61bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) { 62 return false; // TUNING 63} 64 65bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) { 66 return false; // TUNING 67} 68 69/* 70 * Load a immediate using a shortcut if possible; otherwise 71 * grab from the per-translation literal pool. If target is 72 * a high register, build constant into a low register and copy. 73 * 74 * No additional register clobbering operation performed. Use this version when 75 * 1) r_dest is freshly returned from AllocTemp or 76 * 2) The codegen is under fixed register usage 77 */ 78LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) { 79 LIR *res; 80 81 RegStorage r_dest_save = r_dest; 82 int is_fp_reg = r_dest.IsFloat(); 83 if (is_fp_reg) { 84 DCHECK(r_dest.IsSingle()); 85 r_dest = AllocTemp(); 86 } 87 88 /* See if the value can be constructed cheaply */ 89 if (value == 0) { 90 res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO); 91 } else if ((value > 0) && (value <= 65535)) { 92 res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value); 93 } else if ((value < 0) && (value >= -32768)) { 94 res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value); 95 } else { 96 res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16); 97 if (value & 0xffff) 98 NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value); 99 } 100 101 if (is_fp_reg) { 102 NewLIR2(kMipsMtc1, r_dest.GetReg(), r_dest_save.GetReg()); 103 FreeTemp(r_dest); 104 } 105 106 return res; 107} 108 109LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) { 110 LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/); 111 res->target = target; 112 return res; 113} 114 115LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) { 116 MipsOpCode opcode = kMipsNop; 117 switch (op) { 118 case kOpBlx: 119 opcode = kMipsJalr; 120 break; 121 case kOpBx: 122 return NewLIR1(kMipsJr, r_dest_src.GetReg()); 123 break; 124 default: 125 LOG(FATAL) << "Bad case in OpReg"; 126 } 127 return NewLIR2(opcode, rRA, r_dest_src.GetReg()); 128} 129 130LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) { 131 LIR *res; 132 bool neg = (value < 0); 133 int abs_value = (neg) ? -value : value; 134 bool short_form = (abs_value & 0xff) == abs_value; 135 MipsOpCode opcode = kMipsNop; 136 switch (op) { 137 case kOpAdd: 138 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value); 139 break; 140 case kOpSub: 141 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value); 142 break; 143 default: 144 LOG(FATAL) << "Bad case in OpRegImm"; 145 break; 146 } 147 if (short_form) { 148 res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value); 149 } else { 150 RegStorage r_scratch = AllocTemp(); 151 res = LoadConstant(r_scratch, value); 152 if (op == kOpCmp) 153 NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg()); 154 else 155 NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg()); 156 } 157 return res; 158} 159 160LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) { 161 MipsOpCode opcode = kMipsNop; 162 switch (op) { 163 case kOpAdd: 164 opcode = kMipsAddu; 165 break; 166 case kOpSub: 167 opcode = kMipsSubu; 168 break; 169 case kOpAnd: 170 opcode = kMipsAnd; 171 break; 172 case kOpMul: 173 opcode = kMipsMul; 174 break; 175 case kOpOr: 176 opcode = kMipsOr; 177 break; 178 case kOpXor: 179 opcode = kMipsXor; 180 break; 181 case kOpLsl: 182 opcode = kMipsSllv; 183 break; 184 case kOpLsr: 185 opcode = kMipsSrlv; 186 break; 187 case kOpAsr: 188 opcode = kMipsSrav; 189 break; 190 case kOpAdc: 191 case kOpSbc: 192 LOG(FATAL) << "No carry bit on MIPS"; 193 break; 194 default: 195 LOG(FATAL) << "bad case in OpRegRegReg"; 196 break; 197 } 198 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg()); 199} 200 201LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) { 202 LIR *res; 203 MipsOpCode opcode = kMipsNop; 204 bool short_form = true; 205 206 switch (op) { 207 case kOpAdd: 208 if (IS_SIMM16(value)) { 209 opcode = kMipsAddiu; 210 } else { 211 short_form = false; 212 opcode = kMipsAddu; 213 } 214 break; 215 case kOpSub: 216 if (IS_SIMM16((-value))) { 217 value = -value; 218 opcode = kMipsAddiu; 219 } else { 220 short_form = false; 221 opcode = kMipsSubu; 222 } 223 break; 224 case kOpLsl: 225 DCHECK(value >= 0 && value <= 31); 226 opcode = kMipsSll; 227 break; 228 case kOpLsr: 229 DCHECK(value >= 0 && value <= 31); 230 opcode = kMipsSrl; 231 break; 232 case kOpAsr: 233 DCHECK(value >= 0 && value <= 31); 234 opcode = kMipsSra; 235 break; 236 case kOpAnd: 237 if (IS_UIMM16((value))) { 238 opcode = kMipsAndi; 239 } else { 240 short_form = false; 241 opcode = kMipsAnd; 242 } 243 break; 244 case kOpOr: 245 if (IS_UIMM16((value))) { 246 opcode = kMipsOri; 247 } else { 248 short_form = false; 249 opcode = kMipsOr; 250 } 251 break; 252 case kOpXor: 253 if (IS_UIMM16((value))) { 254 opcode = kMipsXori; 255 } else { 256 short_form = false; 257 opcode = kMipsXor; 258 } 259 break; 260 case kOpMul: 261 short_form = false; 262 opcode = kMipsMul; 263 break; 264 default: 265 LOG(FATAL) << "Bad case in OpRegRegImm"; 266 break; 267 } 268 269 if (short_form) { 270 res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value); 271 } else { 272 if (r_dest != r_src1) { 273 res = LoadConstant(r_dest, value); 274 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg()); 275 } else { 276 RegStorage r_scratch = AllocTemp(); 277 res = LoadConstant(r_scratch, value); 278 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg()); 279 } 280 } 281 return res; 282} 283 284LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) { 285 MipsOpCode opcode = kMipsNop; 286 LIR *res; 287 switch (op) { 288 case kOpMov: 289 opcode = kMipsMove; 290 break; 291 case kOpMvn: 292 return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO); 293 case kOpNeg: 294 return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg()); 295 case kOpAdd: 296 case kOpAnd: 297 case kOpMul: 298 case kOpOr: 299 case kOpSub: 300 case kOpXor: 301 return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2); 302 case kOp2Byte: 303#if __mips_isa_rev >= 2 304 res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg()); 305#else 306 res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24); 307 OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24); 308#endif 309 return res; 310 case kOp2Short: 311#if __mips_isa_rev >= 2 312 res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg()); 313#else 314 res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16); 315 OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16); 316#endif 317 return res; 318 case kOp2Char: 319 return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF); 320 default: 321 LOG(FATAL) << "Bad case in OpRegReg"; 322 break; 323 } 324 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg()); 325} 326 327LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 328 MoveType move_type) { 329 UNIMPLEMENTED(FATAL); 330 return nullptr; 331} 332 333LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { 334 UNIMPLEMENTED(FATAL); 335 return nullptr; 336} 337 338LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { 339 LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS"; 340 return NULL; 341} 342 343LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { 344 LIR *res; 345 res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value)); 346 LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value)); 347 return res; 348} 349 350/* Load value from base + scaled index. */ 351LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 352 int scale, OpSize size) { 353 LIR *first = NULL; 354 LIR *res; 355 MipsOpCode opcode = kMipsNop; 356 RegStorage t_reg = AllocTemp(); 357 358 if (r_dest.IsFloat()) { 359 DCHECK(r_dest.IsSingle()); 360 DCHECK((size == k32) || (size == kSingle) || (size == kReference)); 361 size = kSingle; 362 } else { 363 if (size == kSingle) 364 size = k32; 365 } 366 367 if (!scale) { 368 first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg()); 369 } else { 370 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale); 371 NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg()); 372 } 373 374 switch (size) { 375 case kSingle: 376 opcode = kMipsFlwc1; 377 break; 378 case k32: 379 case kReference: 380 opcode = kMipsLw; 381 break; 382 case kUnsignedHalf: 383 opcode = kMipsLhu; 384 break; 385 case kSignedHalf: 386 opcode = kMipsLh; 387 break; 388 case kUnsignedByte: 389 opcode = kMipsLbu; 390 break; 391 case kSignedByte: 392 opcode = kMipsLb; 393 break; 394 default: 395 LOG(FATAL) << "Bad case in LoadBaseIndexed"; 396 } 397 398 res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg()); 399 FreeTemp(t_reg); 400 return (first) ? first : res; 401} 402 403/* store value base base + scaled index. */ 404LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 405 int scale, OpSize size) { 406 LIR *first = NULL; 407 MipsOpCode opcode = kMipsNop; 408 RegStorage t_reg = AllocTemp(); 409 410 if (r_src.IsFloat()) { 411 DCHECK(r_src.IsSingle()); 412 DCHECK((size == k32) || (size == kSingle) || (size == kReference)); 413 size = kSingle; 414 } else { 415 if (size == kSingle) 416 size = k32; 417 } 418 419 if (!scale) { 420 first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg()); 421 } else { 422 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale); 423 NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg()); 424 } 425 426 switch (size) { 427 case kSingle: 428 opcode = kMipsFswc1; 429 break; 430 case k32: 431 case kReference: 432 opcode = kMipsSw; 433 break; 434 case kUnsignedHalf: 435 case kSignedHalf: 436 opcode = kMipsSh; 437 break; 438 case kUnsignedByte: 439 case kSignedByte: 440 opcode = kMipsSb; 441 break; 442 default: 443 LOG(FATAL) << "Bad case in StoreBaseIndexed"; 444 } 445 NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg()); 446 return first; 447} 448 449// FIXME: don't split r_dest into 2 containers. 450LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, 451 RegStorage r_dest_hi, OpSize size) { 452/* 453 * Load value from base + displacement. Optionally perform null check 454 * on base (which must have an associated s_reg and MIR). If not 455 * performing null check, incoming MIR can be null. IMPORTANT: this 456 * code must not allocate any new temps. If a new register is needed 457 * and base and dest are the same, spill some other register to 458 * rlp and then restore. 459 */ 460 LIR *res; 461 LIR *load = NULL; 462 LIR *load2 = NULL; 463 MipsOpCode opcode = kMipsNop; 464 bool short_form = IS_SIMM16(displacement); 465 bool pair = false; 466 467 switch (size) { 468 case k64: 469 case kDouble: 470 pair = true; 471 opcode = kMipsLw; 472 if (r_dest.IsFloat()) { 473 opcode = kMipsFlwc1; 474 if (r_dest.IsDouble()) { 475 int reg_num = (r_dest.GetRegNum() << 1) | RegStorage::kFloatingPoint; 476 r_dest = RegStorage(RegStorage::k64BitSolo, reg_num, reg_num + 1); 477 } else { 478 DCHECK(r_dest_hi.IsFloat()); 479 DCHECK_EQ(r_dest.GetReg(), r_dest_hi.GetReg() - 1); 480 r_dest_hi.SetReg(r_dest.GetReg() + 1); 481 } 482 } 483 short_form = IS_SIMM16_2WORD(displacement); 484 DCHECK_EQ((displacement & 0x3), 0); 485 break; 486 case k32: 487 case kSingle: 488 case kReference: 489 opcode = kMipsLw; 490 if (r_dest.IsFloat()) { 491 opcode = kMipsFlwc1; 492 DCHECK(r_dest.IsSingle()); 493 } 494 DCHECK_EQ((displacement & 0x3), 0); 495 break; 496 case kUnsignedHalf: 497 opcode = kMipsLhu; 498 DCHECK_EQ((displacement & 0x1), 0); 499 break; 500 case kSignedHalf: 501 opcode = kMipsLh; 502 DCHECK_EQ((displacement & 0x1), 0); 503 break; 504 case kUnsignedByte: 505 opcode = kMipsLbu; 506 break; 507 case kSignedByte: 508 opcode = kMipsLb; 509 break; 510 default: 511 LOG(FATAL) << "Bad case in LoadBaseIndexedBody"; 512 } 513 514 if (short_form) { 515 if (!pair) { 516 load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg()); 517 } else { 518 load = res = NewLIR3(opcode, r_dest.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg()); 519 load2 = NewLIR3(opcode, r_dest_hi.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg()); 520 } 521 } else { 522 if (pair) { 523 RegStorage r_tmp = AllocTemp(); 524 res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement); 525 load = NewLIR3(opcode, r_dest.GetReg(), LOWORD_OFFSET, r_tmp.GetReg()); 526 load2 = NewLIR3(opcode, r_dest_hi.GetReg(), HIWORD_OFFSET, r_tmp.GetReg()); 527 FreeTemp(r_tmp); 528 } else { 529 RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest; 530 res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement); 531 load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg()); 532 if (r_tmp != r_dest) 533 FreeTemp(r_tmp); 534 } 535 } 536 537 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 538 DCHECK(r_base == rs_rMIPS_SP); 539 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 540 true /* is_load */, pair /* is64bit */); 541 if (pair) { 542 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2, 543 true /* is_load */, pair /* is64bit */); 544 } 545 } 546 return load; 547} 548 549LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 550 OpSize size, VolatileKind is_volatile) { 551 if (is_volatile == kVolatile) { 552 DCHECK(size != k64 && size != kDouble); 553 } 554 555 // TODO: base this on target. 556 if (size == kWord) { 557 size = k32; 558 } 559 LIR* load; 560 if (size == k64 || size == kDouble) { 561 load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), size); 562 } else { 563 load = LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size); 564 } 565 566 if (UNLIKELY(is_volatile == kVolatile)) { 567 // Without context sensitive analysis, we must issue the most conservative barriers. 568 // In this case, either a load or store may follow so we issue both barriers. 569 GenMemBarrier(kLoadLoad); 570 GenMemBarrier(kLoadStore); 571 } 572 573 return load; 574} 575 576// FIXME: don't split r_dest into 2 containers. 577LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, 578 RegStorage r_src, RegStorage r_src_hi, OpSize size) { 579 LIR *res; 580 LIR *store = NULL; 581 LIR *store2 = NULL; 582 MipsOpCode opcode = kMipsNop; 583 bool short_form = IS_SIMM16(displacement); 584 bool pair = r_src.IsPair(); 585 586 switch (size) { 587 case k64: 588 case kDouble: 589 opcode = kMipsSw; 590 if (r_src.IsFloat()) { 591 opcode = kMipsFswc1; 592 if (r_src.IsDouble()) { 593 int reg_num = (r_src.GetRegNum() << 1) | RegStorage::kFloatingPoint; 594 r_src = RegStorage(RegStorage::k64BitPair, reg_num, reg_num + 1); 595 } else { 596 DCHECK(r_src_hi.IsFloat()); 597 DCHECK_EQ(r_src.GetReg(), (r_src_hi.GetReg() - 1)); 598 r_src_hi.SetReg(r_src.GetReg() + 1); 599 } 600 } 601 short_form = IS_SIMM16_2WORD(displacement); 602 DCHECK_EQ((displacement & 0x3), 0); 603 break; 604 case k32: 605 case kSingle: 606 case kReference: 607 opcode = kMipsSw; 608 if (r_src.IsFloat()) { 609 opcode = kMipsFswc1; 610 DCHECK(r_src.IsSingle()); 611 } 612 DCHECK_EQ((displacement & 0x3), 0); 613 break; 614 case kUnsignedHalf: 615 case kSignedHalf: 616 opcode = kMipsSh; 617 DCHECK_EQ((displacement & 0x1), 0); 618 break; 619 case kUnsignedByte: 620 case kSignedByte: 621 opcode = kMipsSb; 622 break; 623 default: 624 LOG(FATAL) << "Bad case in StoreBaseDispBody"; 625 } 626 627 if (short_form) { 628 if (!pair) { 629 store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg()); 630 } else { 631 store = res = NewLIR3(opcode, r_src.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg()); 632 store2 = NewLIR3(opcode, r_src_hi.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg()); 633 } 634 } else { 635 RegStorage r_scratch = AllocTemp(); 636 res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement); 637 if (!pair) { 638 store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg()); 639 } else { 640 store = NewLIR3(opcode, r_src.GetReg(), LOWORD_OFFSET, r_scratch.GetReg()); 641 store2 = NewLIR3(opcode, r_src_hi.GetReg(), HIWORD_OFFSET, r_scratch.GetReg()); 642 } 643 FreeTemp(r_scratch); 644 } 645 646 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 647 DCHECK(r_base == rs_rMIPS_SP); 648 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 649 false /* is_load */, pair /* is64bit */); 650 if (pair) { 651 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2, 652 false /* is_load */, pair /* is64bit */); 653 } 654 } 655 656 return res; 657} 658 659LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 660 OpSize size, VolatileKind is_volatile) { 661 if (is_volatile == kVolatile) { 662 DCHECK(size != k64 && size != kDouble); 663 // There might have been a store before this volatile one so insert StoreStore barrier. 664 GenMemBarrier(kStoreStore); 665 } 666 667 // TODO: base this on target. 668 if (size == kWord) { 669 size = k32; 670 } 671 LIR* store; 672 if (size == k64 || size == kDouble) { 673 store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), size); 674 } else { 675 store = StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size); 676 } 677 678 if (UNLIKELY(is_volatile == kVolatile)) { 679 // A load might follow the volatile store so insert a StoreLoad barrier. 680 GenMemBarrier(kStoreLoad); 681 } 682 683 return store; 684} 685 686LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) { 687 LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS"; 688 return NULL; 689} 690 691LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) { 692 UNIMPLEMENTED(FATAL) << "Should not be called."; 693 return nullptr; 694} 695 696LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { 697 LOG(FATAL) << "Unexpected use of OpMem for MIPS"; 698 return NULL; 699} 700 701LIR* MipsMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 702 int displacement, RegStorage r_src, OpSize size) { 703 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS"; 704 return NULL; 705} 706 707LIR* MipsMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) { 708 LOG(FATAL) << "Unexpected use of OpRegMem for MIPS"; 709 return NULL; 710} 711 712LIR* MipsMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 713 int displacement, RegStorage r_dest, OpSize size) { 714 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS"; 715 return NULL; 716} 717 718LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { 719 LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS"; 720 return NULL; 721} 722 723} // namespace art 724