1//===--- HexagonBitTracker.cpp --------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "llvm/CodeGen/MachineRegisterInfo.h" 11#include "llvm/IR/Module.h" 12#include "llvm/Support/Debug.h" 13#include "llvm/Support/raw_ostream.h" 14 15#include "Hexagon.h" 16#include "HexagonInstrInfo.h" 17#include "HexagonRegisterInfo.h" 18#include "HexagonTargetMachine.h" 19#include "HexagonBitTracker.h" 20 21using namespace llvm; 22 23typedef BitTracker BT; 24 25HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, 26 MachineRegisterInfo &mri, 27 const HexagonInstrInfo &tii, 28 MachineFunction &mf) 29 : MachineEvaluator(tri, mri), MF(mf), MFI(*mf.getFrameInfo()), TII(tii) { 30 // Populate the VRX map (VR to extension-type). 31 // Go over all the formal parameters of the function. If a given parameter 32 // P is sign- or zero-extended, locate the virtual register holding that 33 // parameter and create an entry in the VRX map indicating the type of ex- 34 // tension (and the source type). 35 // This is a bit complicated to do accurately, since the memory layout in- 36 // formation is necessary to precisely determine whether an aggregate para- 37 // meter will be passed in a register or in memory. What is given in MRI 38 // is the association between the physical register that is live-in (i.e. 39 // holds an argument), and the virtual register that this value will be 40 // copied into. This, by itself, is not sufficient to map back the virtual 41 // register to a formal parameter from Function (since consecutive live-ins 42 // from MRI may not correspond to consecutive formal parameters from Func- 43 // tion). To avoid the complications with in-memory arguments, only consi- 44 // der the initial sequence of formal parameters that are known to be 45 // passed via registers. 46 unsigned AttrIdx = 0; 47 unsigned InVirtReg, InPhysReg = 0; 48 const Function &F = *MF.getFunction(); 49 typedef Function::const_arg_iterator arg_iterator; 50 for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 51 AttrIdx++; 52 const Argument &Arg = *I; 53 Type *ATy = Arg.getType(); 54 unsigned Width = 0; 55 if (ATy->isIntegerTy()) 56 Width = ATy->getIntegerBitWidth(); 57 else if (ATy->isPointerTy()) 58 Width = 32; 59 // If pointer size is not set through target data, it will default to 60 // Module::AnyPointerSize. 61 if (Width == 0 || Width > 64) 62 break; 63 InPhysReg = getNextPhysReg(InPhysReg, Width); 64 if (!InPhysReg) 65 break; 66 InVirtReg = getVirtRegFor(InPhysReg); 67 if (!InVirtReg) 68 continue; 69 AttributeSet Attrs = F.getAttributes(); 70 if (Attrs.hasAttribute(AttrIdx, Attribute::SExt)) 71 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width))); 72 else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt)) 73 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width))); 74 } 75} 76 77 78BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const { 79 if (Sub == 0) 80 return MachineEvaluator::mask(Reg, 0); 81 using namespace Hexagon; 82 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 83 unsigned ID = RC->getID(); 84 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub)); 85 switch (ID) { 86 case DoubleRegsRegClassID: 87 case VecDblRegsRegClassID: 88 case VecDblRegs128BRegClassID: 89 return (Sub == subreg_loreg) ? BT::BitMask(0, RW-1) 90 : BT::BitMask(RW, 2*RW-1); 91 default: 92 break; 93 } 94#ifndef NDEBUG 95 dbgs() << PrintReg(Reg, &TRI, Sub) << '\n'; 96#endif 97 llvm_unreachable("Unexpected register/subregister"); 98} 99 100namespace { 101class RegisterRefs { 102 std::vector<BT::RegisterRef> Vector; 103 104public: 105 RegisterRefs(const MachineInstr &MI) : Vector(MI.getNumOperands()) { 106 for (unsigned i = 0, n = Vector.size(); i < n; ++i) { 107 const MachineOperand &MO = MI.getOperand(i); 108 if (MO.isReg()) 109 Vector[i] = BT::RegisterRef(MO); 110 // For indices that don't correspond to registers, the entry will 111 // remain constructed via the default constructor. 112 } 113 } 114 115 size_t size() const { return Vector.size(); } 116 const BT::RegisterRef &operator[](unsigned n) const { 117 // The main purpose of this operator is to assert with bad argument. 118 assert(n < Vector.size()); 119 return Vector[n]; 120 } 121}; 122} 123 124bool HexagonEvaluator::evaluate(const MachineInstr &MI, 125 const CellMapType &Inputs, 126 CellMapType &Outputs) const { 127 unsigned NumDefs = 0; 128 129 // Sanity verification: there should not be any defs with subregisters. 130 for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) { 131 const MachineOperand &MO = MI.getOperand(i); 132 if (!MO.isReg() || !MO.isDef()) 133 continue; 134 NumDefs++; 135 assert(MO.getSubReg() == 0); 136 } 137 138 if (NumDefs == 0) 139 return false; 140 141 if (MI.mayLoad()) 142 return evaluateLoad(MI, Inputs, Outputs); 143 144 // Check COPY instructions that copy formal parameters into virtual 145 // registers. Such parameters can be sign- or zero-extended at the 146 // call site, and we should take advantage of this knowledge. The MRI 147 // keeps a list of pairs of live-in physical and virtual registers, 148 // which provides information about which virtual registers will hold 149 // the argument values. The function will still contain instructions 150 // defining those virtual registers, and in practice those are COPY 151 // instructions from a physical to a virtual register. In such cases, 152 // applying the argument extension to the virtual register can be seen 153 // as simply mirroring the extension that had already been applied to 154 // the physical register at the call site. If the defining instruction 155 // was not a COPY, it would not be clear how to mirror that extension 156 // on the callee's side. For that reason, only check COPY instructions 157 // for potential extensions. 158 if (MI.isCopy()) { 159 if (evaluateFormalCopy(MI, Inputs, Outputs)) 160 return true; 161 } 162 163 // Beyond this point, if any operand is a global, skip that instruction. 164 // The reason is that certain instructions that can take an immediate 165 // operand can also have a global symbol in that operand. To avoid 166 // checking what kind of operand a given instruction has individually 167 // for each instruction, do it here. Global symbols as operands gene- 168 // rally do not provide any useful information. 169 for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) { 170 const MachineOperand &MO = MI.getOperand(i); 171 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() || 172 MO.isCPI()) 173 return false; 174 } 175 176 RegisterRefs Reg(MI); 177 unsigned Opc = MI.getOpcode(); 178 using namespace Hexagon; 179#define op(i) MI.getOperand(i) 180#define rc(i) RegisterCell::ref(getCell(Reg[i], Inputs)) 181#define im(i) MI.getOperand(i).getImm() 182 183 // If the instruction has no register operands, skip it. 184 if (Reg.size() == 0) 185 return false; 186 187 // Record result for register in operand 0. 188 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) 189 -> bool { 190 putCell(Reg[0], Val, Outputs); 191 return true; 192 }; 193 // Get the cell corresponding to the N-th operand. 194 auto cop = [this, &Reg, &MI, &Inputs](unsigned N, 195 uint16_t W) -> BT::RegisterCell { 196 const MachineOperand &Op = MI.getOperand(N); 197 if (Op.isImm()) 198 return eIMM(Op.getImm(), W); 199 if (!Op.isReg()) 200 return RegisterCell::self(0, W); 201 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch"); 202 return rc(N); 203 }; 204 // Extract RW low bits of the cell. 205 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW) 206 -> BT::RegisterCell { 207 assert(RW <= RC.width()); 208 return eXTR(RC, 0, RW); 209 }; 210 // Extract RW high bits of the cell. 211 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW) 212 -> BT::RegisterCell { 213 uint16_t W = RC.width(); 214 assert(RW <= W); 215 return eXTR(RC, W-RW, W); 216 }; 217 // Extract N-th halfword (counting from the least significant position). 218 auto half = [this] (const BT::RegisterCell &RC, unsigned N) 219 -> BT::RegisterCell { 220 assert(N*16+16 <= RC.width()); 221 return eXTR(RC, N*16, N*16+16); 222 }; 223 // Shuffle bits (pick even/odd from cells and merge into result). 224 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt, 225 uint16_t BW, bool Odd) -> BT::RegisterCell { 226 uint16_t I = Odd, Ws = Rs.width(); 227 assert(Ws == Rt.width()); 228 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW)); 229 I += 2; 230 while (I*BW < Ws) { 231 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW)); 232 I += 2; 233 } 234 return RC; 235 }; 236 237 // The bitwidth of the 0th operand. In most (if not all) of the 238 // instructions below, the 0th operand is the defined register. 239 // Pre-compute the bitwidth here, because it is needed in many cases 240 // cases below. 241 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0; 242 243 switch (Opc) { 244 // Transfer immediate: 245 246 case A2_tfrsi: 247 case A2_tfrpi: 248 case CONST32: 249 case CONST32_Float_Real: 250 case CONST32_Int_Real: 251 case CONST64_Float_Real: 252 case CONST64_Int_Real: 253 return rr0(eIMM(im(1), W0), Outputs); 254 case TFR_PdFalse: 255 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); 256 case TFR_PdTrue: 257 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); 258 case TFR_FI: { 259 int FI = op(1).getIndex(); 260 int Off = op(2).getImm(); 261 unsigned A = MFI.getObjectAlignment(FI) + std::abs(Off); 262 unsigned L = Log2_32(A); 263 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); 264 RC.fill(0, L, BT::BitValue::Zero); 265 return rr0(RC, Outputs); 266 } 267 268 // Transfer register: 269 270 case A2_tfr: 271 case A2_tfrp: 272 case C2_pxfer_map: 273 return rr0(rc(1), Outputs); 274 case C2_tfrpr: { 275 uint16_t RW = W0; 276 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 277 assert(PW <= RW); 278 RegisterCell PC = eXTR(rc(1), 0, PW); 279 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1)); 280 RC.fill(PW, RW, BT::BitValue::Zero); 281 return rr0(RC, Outputs); 282 } 283 case C2_tfrrp: { 284 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); 285 W0 = 8; // XXX Pred size 286 return rr0(eINS(RC, eXTR(rc(1), 0, W0), 0), Outputs); 287 } 288 289 // Arithmetic: 290 291 case A2_abs: 292 case A2_absp: 293 // TODO 294 break; 295 296 case A2_addsp: { 297 uint16_t W1 = getRegBitWidth(Reg[1]); 298 assert(W0 == 64 && W1 == 32); 299 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1)); 300 RegisterCell RC = eADD(eSXT(CW, W1), rc(2)); 301 return rr0(RC, Outputs); 302 } 303 case A2_add: 304 case A2_addp: 305 return rr0(eADD(rc(1), rc(2)), Outputs); 306 case A2_addi: 307 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs); 308 case S4_addi_asl_ri: { 309 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3))); 310 return rr0(RC, Outputs); 311 } 312 case S4_addi_lsr_ri: { 313 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3))); 314 return rr0(RC, Outputs); 315 } 316 case S4_addaddi: { 317 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 318 return rr0(RC, Outputs); 319 } 320 case M4_mpyri_addi: { 321 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 322 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 323 return rr0(RC, Outputs); 324 } 325 case M4_mpyrr_addi: { 326 RegisterCell M = eMLS(rc(2), rc(3)); 327 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 328 return rr0(RC, Outputs); 329 } 330 case M4_mpyri_addr_u2: { 331 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3)); 332 RegisterCell RC = eADD(rc(1), lo(M, W0)); 333 return rr0(RC, Outputs); 334 } 335 case M4_mpyri_addr: { 336 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 337 RegisterCell RC = eADD(rc(1), lo(M, W0)); 338 return rr0(RC, Outputs); 339 } 340 case M4_mpyrr_addr: { 341 RegisterCell M = eMLS(rc(2), rc(3)); 342 RegisterCell RC = eADD(rc(1), lo(M, W0)); 343 return rr0(RC, Outputs); 344 } 345 case S4_subaddi: { 346 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3))); 347 return rr0(RC, Outputs); 348 } 349 case M2_accii: { 350 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 351 return rr0(RC, Outputs); 352 } 353 case M2_acci: { 354 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3))); 355 return rr0(RC, Outputs); 356 } 357 case M2_subacc: { 358 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3))); 359 return rr0(RC, Outputs); 360 } 361 case S2_addasl_rrri: { 362 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3))); 363 return rr0(RC, Outputs); 364 } 365 case C4_addipc: { 366 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0); 367 RPC.fill(0, 2, BT::BitValue::Zero); 368 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs); 369 } 370 case A2_sub: 371 case A2_subp: 372 return rr0(eSUB(rc(1), rc(2)), Outputs); 373 case A2_subri: 374 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs); 375 case S4_subi_asl_ri: { 376 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3))); 377 return rr0(RC, Outputs); 378 } 379 case S4_subi_lsr_ri: { 380 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3))); 381 return rr0(RC, Outputs); 382 } 383 case M2_naccii: { 384 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0))); 385 return rr0(RC, Outputs); 386 } 387 case M2_nacci: { 388 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3))); 389 return rr0(RC, Outputs); 390 } 391 // 32-bit negation is done by "Rd = A2_subri 0, Rs" 392 case A2_negp: 393 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs); 394 395 case M2_mpy_up: { 396 RegisterCell M = eMLS(rc(1), rc(2)); 397 return rr0(hi(M, W0), Outputs); 398 } 399 case M2_dpmpyss_s0: 400 return rr0(eMLS(rc(1), rc(2)), Outputs); 401 case M2_dpmpyss_acc_s0: 402 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs); 403 case M2_dpmpyss_nac_s0: 404 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs); 405 case M2_mpyi: { 406 RegisterCell M = eMLS(rc(1), rc(2)); 407 return rr0(lo(M, W0), Outputs); 408 } 409 case M2_macsip: { 410 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 411 RegisterCell RC = eADD(rc(1), lo(M, W0)); 412 return rr0(RC, Outputs); 413 } 414 case M2_macsin: { 415 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 416 RegisterCell RC = eSUB(rc(1), lo(M, W0)); 417 return rr0(RC, Outputs); 418 } 419 case M2_maci: { 420 RegisterCell M = eMLS(rc(2), rc(3)); 421 RegisterCell RC = eADD(rc(1), lo(M, W0)); 422 return rr0(RC, Outputs); 423 } 424 case M2_mpysmi: { 425 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 426 return rr0(lo(M, 32), Outputs); 427 } 428 case M2_mpysin: { 429 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0)); 430 return rr0(lo(M, 32), Outputs); 431 } 432 case M2_mpysip: { 433 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 434 return rr0(lo(M, 32), Outputs); 435 } 436 case M2_mpyu_up: { 437 RegisterCell M = eMLU(rc(1), rc(2)); 438 return rr0(hi(M, W0), Outputs); 439 } 440 case M2_dpmpyuu_s0: 441 return rr0(eMLU(rc(1), rc(2)), Outputs); 442 case M2_dpmpyuu_acc_s0: 443 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs); 444 case M2_dpmpyuu_nac_s0: 445 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs); 446 //case M2_mpysu_up: 447 448 // Logical/bitwise: 449 450 case A2_andir: 451 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs); 452 case A2_and: 453 case A2_andp: 454 return rr0(eAND(rc(1), rc(2)), Outputs); 455 case A4_andn: 456 case A4_andnp: 457 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 458 case S4_andi_asl_ri: { 459 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3))); 460 return rr0(RC, Outputs); 461 } 462 case S4_andi_lsr_ri: { 463 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3))); 464 return rr0(RC, Outputs); 465 } 466 case M4_and_and: 467 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 468 case M4_and_andn: 469 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 470 case M4_and_or: 471 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 472 case M4_and_xor: 473 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs); 474 case A2_orir: 475 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs); 476 case A2_or: 477 case A2_orp: 478 return rr0(eORL(rc(1), rc(2)), Outputs); 479 case A4_orn: 480 case A4_ornp: 481 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 482 case S4_ori_asl_ri: { 483 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3))); 484 return rr0(RC, Outputs); 485 } 486 case S4_ori_lsr_ri: { 487 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3))); 488 return rr0(RC, Outputs); 489 } 490 case M4_or_and: 491 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 492 case M4_or_andn: 493 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 494 case S4_or_andi: 495 case S4_or_andix: { 496 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0))); 497 return rr0(RC, Outputs); 498 } 499 case S4_or_ori: { 500 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0))); 501 return rr0(RC, Outputs); 502 } 503 case M4_or_or: 504 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 505 case M4_or_xor: 506 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs); 507 case A2_xor: 508 case A2_xorp: 509 return rr0(eXOR(rc(1), rc(2)), Outputs); 510 case M4_xor_and: 511 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs); 512 case M4_xor_andn: 513 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 514 case M4_xor_or: 515 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs); 516 case M4_xor_xacc: 517 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs); 518 case A2_not: 519 case A2_notp: 520 return rr0(eNOT(rc(1)), Outputs); 521 522 case S2_asl_i_r: 523 case S2_asl_i_p: 524 return rr0(eASL(rc(1), im(2)), Outputs); 525 case A2_aslh: 526 return rr0(eASL(rc(1), 16), Outputs); 527 case S2_asl_i_r_acc: 528 case S2_asl_i_p_acc: 529 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs); 530 case S2_asl_i_r_nac: 531 case S2_asl_i_p_nac: 532 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs); 533 case S2_asl_i_r_and: 534 case S2_asl_i_p_and: 535 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs); 536 case S2_asl_i_r_or: 537 case S2_asl_i_p_or: 538 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs); 539 case S2_asl_i_r_xacc: 540 case S2_asl_i_p_xacc: 541 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs); 542 case S2_asl_i_vh: 543 case S2_asl_i_vw: 544 // TODO 545 break; 546 547 case S2_asr_i_r: 548 case S2_asr_i_p: 549 return rr0(eASR(rc(1), im(2)), Outputs); 550 case A2_asrh: 551 return rr0(eASR(rc(1), 16), Outputs); 552 case S2_asr_i_r_acc: 553 case S2_asr_i_p_acc: 554 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs); 555 case S2_asr_i_r_nac: 556 case S2_asr_i_p_nac: 557 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs); 558 case S2_asr_i_r_and: 559 case S2_asr_i_p_and: 560 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs); 561 case S2_asr_i_r_or: 562 case S2_asr_i_p_or: 563 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs); 564 case S2_asr_i_r_rnd: { 565 // The input is first sign-extended to 64 bits, then the output 566 // is truncated back to 32 bits. 567 assert(W0 == 32); 568 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 569 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1); 570 return rr0(eXTR(RC, 0, W0), Outputs); 571 } 572 case S2_asr_i_r_rnd_goodsyntax: { 573 int64_t S = im(2); 574 if (S == 0) 575 return rr0(rc(1), Outputs); 576 // Result: S2_asr_i_r_rnd Rs, u5-1 577 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 578 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1); 579 return rr0(eXTR(RC, 0, W0), Outputs); 580 } 581 case S2_asr_r_vh: 582 case S2_asr_i_vw: 583 case S2_asr_i_svw_trun: 584 // TODO 585 break; 586 587 case S2_lsr_i_r: 588 case S2_lsr_i_p: 589 return rr0(eLSR(rc(1), im(2)), Outputs); 590 case S2_lsr_i_r_acc: 591 case S2_lsr_i_p_acc: 592 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs); 593 case S2_lsr_i_r_nac: 594 case S2_lsr_i_p_nac: 595 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs); 596 case S2_lsr_i_r_and: 597 case S2_lsr_i_p_and: 598 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs); 599 case S2_lsr_i_r_or: 600 case S2_lsr_i_p_or: 601 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs); 602 case S2_lsr_i_r_xacc: 603 case S2_lsr_i_p_xacc: 604 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs); 605 606 case S2_clrbit_i: { 607 RegisterCell RC = rc(1); 608 RC[im(2)] = BT::BitValue::Zero; 609 return rr0(RC, Outputs); 610 } 611 case S2_setbit_i: { 612 RegisterCell RC = rc(1); 613 RC[im(2)] = BT::BitValue::One; 614 return rr0(RC, Outputs); 615 } 616 case S2_togglebit_i: { 617 RegisterCell RC = rc(1); 618 uint16_t BX = im(2); 619 RC[BX] = RC[BX].is(0) ? BT::BitValue::One 620 : RC[BX].is(1) ? BT::BitValue::Zero 621 : BT::BitValue::self(); 622 return rr0(RC, Outputs); 623 } 624 625 case A4_bitspliti: { 626 uint16_t W1 = getRegBitWidth(Reg[1]); 627 uint16_t BX = im(2); 628 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx] 629 const BT::BitValue Zero = BT::BitValue::Zero; 630 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero) 631 .fill(W1+(W1-BX), W0, Zero); 632 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1); 633 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1); 634 return rr0(RC, Outputs); 635 } 636 case S4_extract: 637 case S4_extractp: 638 case S2_extractu: 639 case S2_extractup: { 640 uint16_t Wd = im(2), Of = im(3); 641 assert(Wd <= W0); 642 if (Wd == 0) 643 return rr0(eIMM(0, W0), Outputs); 644 // If the width extends beyond the register size, pad the register 645 // with 0 bits. 646 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1); 647 RegisterCell Ext = eXTR(Pad, Of, Wd+Of); 648 // Ext is short, need to extend it with 0s or sign bit. 649 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1)); 650 if (Opc == S2_extractu || Opc == S2_extractup) 651 return rr0(eZXT(RC, Wd), Outputs); 652 return rr0(eSXT(RC, Wd), Outputs); 653 } 654 case S2_insert: 655 case S2_insertp: { 656 uint16_t Wd = im(3), Of = im(4); 657 assert(Wd < W0 && Of < W0); 658 // If Wd+Of exceeds W0, the inserted bits are truncated. 659 if (Wd+Of > W0) 660 Wd = W0-Of; 661 if (Wd == 0) 662 return rr0(rc(1), Outputs); 663 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs); 664 } 665 666 // Bit permutations: 667 668 case A2_combineii: 669 case A4_combineii: 670 case A4_combineir: 671 case A4_combineri: 672 case A2_combinew: 673 assert(W0 % 2 == 0); 674 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs); 675 case A2_combine_ll: 676 case A2_combine_lh: 677 case A2_combine_hl: 678 case A2_combine_hh: { 679 assert(W0 == 32); 680 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 681 // Low half in the output is 0 for _ll and _hl, 1 otherwise: 682 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl); 683 // High half in the output is 0 for _ll and _lh, 1 otherwise: 684 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh); 685 RegisterCell R1 = rc(1); 686 RegisterCell R2 = rc(2); 687 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH)); 688 return rr0(RC, Outputs); 689 } 690 case S2_packhl: { 691 assert(W0 == 64); 692 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 693 RegisterCell R1 = rc(1); 694 RegisterCell R2 = rc(2); 695 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1)) 696 .cat(half(R1, 1)); 697 return rr0(RC, Outputs); 698 } 699 case S2_shuffeb: { 700 RegisterCell RC = shuffle(rc(1), rc(2), 8, false); 701 return rr0(RC, Outputs); 702 } 703 case S2_shuffeh: { 704 RegisterCell RC = shuffle(rc(1), rc(2), 16, false); 705 return rr0(RC, Outputs); 706 } 707 case S2_shuffob: { 708 RegisterCell RC = shuffle(rc(1), rc(2), 8, true); 709 return rr0(RC, Outputs); 710 } 711 case S2_shuffoh: { 712 RegisterCell RC = shuffle(rc(1), rc(2), 16, true); 713 return rr0(RC, Outputs); 714 } 715 case C2_mask: { 716 uint16_t WR = W0; 717 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 718 assert(WR == 64 && WP == 8); 719 RegisterCell R1 = rc(1); 720 RegisterCell RC(WR); 721 for (uint16_t i = 0; i < WP; ++i) { 722 const BT::BitValue &V = R1[i]; 723 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self(); 724 RC.fill(i*8, i*8+8, F); 725 } 726 return rr0(RC, Outputs); 727 } 728 729 // Mux: 730 731 case C2_muxii: 732 case C2_muxir: 733 case C2_muxri: 734 case C2_mux: { 735 BT::BitValue PC0 = rc(1)[0]; 736 RegisterCell R2 = cop(2, W0); 737 RegisterCell R3 = cop(3, W0); 738 if (PC0.is(0) || PC0.is(1)) 739 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs); 740 R2.meet(R3, Reg[0].Reg); 741 return rr0(R2, Outputs); 742 } 743 case C2_vmux: 744 // TODO 745 break; 746 747 // Sign- and zero-extension: 748 749 case A2_sxtb: 750 return rr0(eSXT(rc(1), 8), Outputs); 751 case A2_sxth: 752 return rr0(eSXT(rc(1), 16), Outputs); 753 case A2_sxtw: { 754 uint16_t W1 = getRegBitWidth(Reg[1]); 755 assert(W0 == 64 && W1 == 32); 756 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1); 757 return rr0(RC, Outputs); 758 } 759 case A2_zxtb: 760 return rr0(eZXT(rc(1), 8), Outputs); 761 case A2_zxth: 762 return rr0(eZXT(rc(1), 16), Outputs); 763 764 // Bit count: 765 766 case S2_cl0: 767 case S2_cl0p: 768 // Always produce a 32-bit result. 769 return rr0(eCLB(rc(1), 0/*bit*/, 32), Outputs); 770 case S2_cl1: 771 case S2_cl1p: 772 return rr0(eCLB(rc(1), 1/*bit*/, 32), Outputs); 773 case S2_clb: 774 case S2_clbp: { 775 uint16_t W1 = getRegBitWidth(Reg[1]); 776 RegisterCell R1 = rc(1); 777 BT::BitValue TV = R1[W1-1]; 778 if (TV.is(0) || TV.is(1)) 779 return rr0(eCLB(R1, TV, 32), Outputs); 780 break; 781 } 782 case S2_ct0: 783 case S2_ct0p: 784 return rr0(eCTB(rc(1), 0/*bit*/, 32), Outputs); 785 case S2_ct1: 786 case S2_ct1p: 787 return rr0(eCTB(rc(1), 1/*bit*/, 32), Outputs); 788 case S5_popcountp: 789 // TODO 790 break; 791 792 case C2_all8: { 793 RegisterCell P1 = rc(1); 794 bool Has0 = false, All1 = true; 795 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 796 if (!P1[i].is(1)) 797 All1 = false; 798 if (!P1[i].is(0)) 799 continue; 800 Has0 = true; 801 break; 802 } 803 if (!Has0 && !All1) 804 break; 805 RegisterCell RC(W0); 806 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero)); 807 return rr0(RC, Outputs); 808 } 809 case C2_any8: { 810 RegisterCell P1 = rc(1); 811 bool Has1 = false, All0 = true; 812 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 813 if (!P1[i].is(0)) 814 All0 = false; 815 if (!P1[i].is(1)) 816 continue; 817 Has1 = true; 818 break; 819 } 820 if (!Has1 && !All0) 821 break; 822 RegisterCell RC(W0); 823 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero)); 824 return rr0(RC, Outputs); 825 } 826 case C2_and: 827 return rr0(eAND(rc(1), rc(2)), Outputs); 828 case C2_andn: 829 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 830 case C2_not: 831 return rr0(eNOT(rc(1)), Outputs); 832 case C2_or: 833 return rr0(eORL(rc(1), rc(2)), Outputs); 834 case C2_orn: 835 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 836 case C2_xor: 837 return rr0(eXOR(rc(1), rc(2)), Outputs); 838 case C4_and_and: 839 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 840 case C4_and_andn: 841 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 842 case C4_and_or: 843 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 844 case C4_and_orn: 845 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 846 case C4_or_and: 847 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 848 case C4_or_andn: 849 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 850 case C4_or_or: 851 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 852 case C4_or_orn: 853 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 854 case C2_bitsclr: 855 case C2_bitsclri: 856 case C2_bitsset: 857 case C4_nbitsclr: 858 case C4_nbitsclri: 859 case C4_nbitsset: 860 // TODO 861 break; 862 case S2_tstbit_i: 863 case S4_ntstbit_i: { 864 BT::BitValue V = rc(1)[im(2)]; 865 if (V.is(0) || V.is(1)) { 866 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0. 867 bool TV = (Opc == S2_tstbit_i); 868 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero; 869 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs); 870 } 871 break; 872 } 873 874 default: 875 return MachineEvaluator::evaluate(MI, Inputs, Outputs); 876 } 877 #undef im 878 #undef rc 879 #undef op 880 return false; 881} 882 883bool HexagonEvaluator::evaluate(const MachineInstr &BI, 884 const CellMapType &Inputs, 885 BranchTargetList &Targets, 886 bool &FallsThru) const { 887 // We need to evaluate one branch at a time. TII::AnalyzeBranch checks 888 // all the branches in a basic block at once, so we cannot use it. 889 unsigned Opc = BI.getOpcode(); 890 bool SimpleBranch = false; 891 bool Negated = false; 892 switch (Opc) { 893 case Hexagon::J2_jumpf: 894 case Hexagon::J2_jumpfnew: 895 case Hexagon::J2_jumpfnewpt: 896 Negated = true; 897 case Hexagon::J2_jumpt: 898 case Hexagon::J2_jumptnew: 899 case Hexagon::J2_jumptnewpt: 900 // Simple branch: if([!]Pn) jump ... 901 // i.e. Op0 = predicate, Op1 = branch target. 902 SimpleBranch = true; 903 break; 904 case Hexagon::J2_jump: 905 Targets.insert(BI.getOperand(0).getMBB()); 906 FallsThru = false; 907 return true; 908 default: 909 // If the branch is of unknown type, assume that all successors are 910 // executable. 911 return false; 912 } 913 914 if (!SimpleBranch) 915 return false; 916 917 // BI is a conditional branch if we got here. 918 RegisterRef PR = BI.getOperand(0); 919 RegisterCell PC = getCell(PR, Inputs); 920 const BT::BitValue &Test = PC[0]; 921 922 // If the condition is neither true nor false, then it's unknown. 923 if (!Test.is(0) && !Test.is(1)) 924 return false; 925 926 // "Test.is(!Negated)" means "branch condition is true". 927 if (!Test.is(!Negated)) { 928 // Condition known to be false. 929 FallsThru = true; 930 return true; 931 } 932 933 Targets.insert(BI.getOperand(1).getMBB()); 934 FallsThru = false; 935 return true; 936} 937 938bool HexagonEvaluator::evaluateLoad(const MachineInstr &MI, 939 const CellMapType &Inputs, 940 CellMapType &Outputs) const { 941 if (TII.isPredicated(MI)) 942 return false; 943 assert(MI.mayLoad() && "A load that mayn't?"); 944 unsigned Opc = MI.getOpcode(); 945 946 uint16_t BitNum; 947 bool SignEx; 948 using namespace Hexagon; 949 950 switch (Opc) { 951 default: 952 return false; 953 954#if 0 955 // memb_fifo 956 case L2_loadalignb_pbr: 957 case L2_loadalignb_pcr: 958 case L2_loadalignb_pi: 959 // memh_fifo 960 case L2_loadalignh_pbr: 961 case L2_loadalignh_pcr: 962 case L2_loadalignh_pi: 963 // membh 964 case L2_loadbsw2_pbr: 965 case L2_loadbsw2_pci: 966 case L2_loadbsw2_pcr: 967 case L2_loadbsw2_pi: 968 case L2_loadbsw4_pbr: 969 case L2_loadbsw4_pci: 970 case L2_loadbsw4_pcr: 971 case L2_loadbsw4_pi: 972 // memubh 973 case L2_loadbzw2_pbr: 974 case L2_loadbzw2_pci: 975 case L2_loadbzw2_pcr: 976 case L2_loadbzw2_pi: 977 case L2_loadbzw4_pbr: 978 case L2_loadbzw4_pci: 979 case L2_loadbzw4_pcr: 980 case L2_loadbzw4_pi: 981#endif 982 983 case L2_loadrbgp: 984 case L2_loadrb_io: 985 case L2_loadrb_pbr: 986 case L2_loadrb_pci: 987 case L2_loadrb_pcr: 988 case L2_loadrb_pi: 989 case L4_loadrb_abs: 990 case L4_loadrb_ap: 991 case L4_loadrb_rr: 992 case L4_loadrb_ur: 993 BitNum = 8; 994 SignEx = true; 995 break; 996 997 case L2_loadrubgp: 998 case L2_loadrub_io: 999 case L2_loadrub_pbr: 1000 case L2_loadrub_pci: 1001 case L2_loadrub_pcr: 1002 case L2_loadrub_pi: 1003 case L4_loadrub_abs: 1004 case L4_loadrub_ap: 1005 case L4_loadrub_rr: 1006 case L4_loadrub_ur: 1007 BitNum = 8; 1008 SignEx = false; 1009 break; 1010 1011 case L2_loadrhgp: 1012 case L2_loadrh_io: 1013 case L2_loadrh_pbr: 1014 case L2_loadrh_pci: 1015 case L2_loadrh_pcr: 1016 case L2_loadrh_pi: 1017 case L4_loadrh_abs: 1018 case L4_loadrh_ap: 1019 case L4_loadrh_rr: 1020 case L4_loadrh_ur: 1021 BitNum = 16; 1022 SignEx = true; 1023 break; 1024 1025 case L2_loadruhgp: 1026 case L2_loadruh_io: 1027 case L2_loadruh_pbr: 1028 case L2_loadruh_pci: 1029 case L2_loadruh_pcr: 1030 case L2_loadruh_pi: 1031 case L4_loadruh_rr: 1032 case L4_loadruh_abs: 1033 case L4_loadruh_ap: 1034 case L4_loadruh_ur: 1035 BitNum = 16; 1036 SignEx = false; 1037 break; 1038 1039 case L2_loadrigp: 1040 case L2_loadri_io: 1041 case L2_loadri_pbr: 1042 case L2_loadri_pci: 1043 case L2_loadri_pcr: 1044 case L2_loadri_pi: 1045 case L2_loadw_locked: 1046 case L4_loadri_abs: 1047 case L4_loadri_ap: 1048 case L4_loadri_rr: 1049 case L4_loadri_ur: 1050 case LDriw_pred: 1051 BitNum = 32; 1052 SignEx = true; 1053 break; 1054 1055 case L2_loadrdgp: 1056 case L2_loadrd_io: 1057 case L2_loadrd_pbr: 1058 case L2_loadrd_pci: 1059 case L2_loadrd_pcr: 1060 case L2_loadrd_pi: 1061 case L4_loadd_locked: 1062 case L4_loadrd_abs: 1063 case L4_loadrd_ap: 1064 case L4_loadrd_rr: 1065 case L4_loadrd_ur: 1066 BitNum = 64; 1067 SignEx = true; 1068 break; 1069 } 1070 1071 const MachineOperand &MD = MI.getOperand(0); 1072 assert(MD.isReg() && MD.isDef()); 1073 RegisterRef RD = MD; 1074 1075 uint16_t W = getRegBitWidth(RD); 1076 assert(W >= BitNum && BitNum > 0); 1077 RegisterCell Res(W); 1078 1079 for (uint16_t i = 0; i < BitNum; ++i) 1080 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i)); 1081 1082 if (SignEx) { 1083 const BT::BitValue &Sign = Res[BitNum-1]; 1084 for (uint16_t i = BitNum; i < W; ++i) 1085 Res[i] = BT::BitValue::ref(Sign); 1086 } else { 1087 for (uint16_t i = BitNum; i < W; ++i) 1088 Res[i] = BT::BitValue::Zero; 1089 } 1090 1091 putCell(RD, Res, Outputs); 1092 return true; 1093} 1094 1095bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr &MI, 1096 const CellMapType &Inputs, 1097 CellMapType &Outputs) const { 1098 // If MI defines a formal parameter, but is not a copy (loads are handled 1099 // in evaluateLoad), then it's not clear what to do. 1100 assert(MI.isCopy()); 1101 1102 RegisterRef RD = MI.getOperand(0); 1103 RegisterRef RS = MI.getOperand(1); 1104 assert(RD.Sub == 0); 1105 if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg)) 1106 return false; 1107 RegExtMap::const_iterator F = VRX.find(RD.Reg); 1108 if (F == VRX.end()) 1109 return false; 1110 1111 uint16_t EW = F->second.Width; 1112 // Store RD's cell into the map. This will associate the cell with a virtual 1113 // register, and make zero-/sign-extends possible (otherwise we would be ex- 1114 // tending "self" bit values, which will have no effect, since "self" values 1115 // cannot be references to anything). 1116 putCell(RD, getCell(RS, Inputs), Outputs); 1117 1118 RegisterCell Res; 1119 // Read RD's cell from the outputs instead of RS's cell from the inputs: 1120 if (F->second.Type == ExtType::SExt) 1121 Res = eSXT(getCell(RD, Outputs), EW); 1122 else if (F->second.Type == ExtType::ZExt) 1123 Res = eZXT(getCell(RD, Outputs), EW); 1124 1125 putCell(RD, Res, Outputs); 1126 return true; 1127} 1128 1129 1130unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const { 1131 using namespace Hexagon; 1132 bool Is64 = DoubleRegsRegClass.contains(PReg); 1133 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg)); 1134 1135 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 }; 1136 static const unsigned Phys64[] = { D0, D1, D2 }; 1137 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned); 1138 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned); 1139 1140 // Return the first parameter register of the required width. 1141 if (PReg == 0) 1142 return (Width <= 32) ? Phys32[0] : Phys64[0]; 1143 1144 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the 1145 // next register. 1146 unsigned Idx32 = 0, Idx64 = 0; 1147 if (!Is64) { 1148 while (Idx32 < Num32) { 1149 if (Phys32[Idx32] == PReg) 1150 break; 1151 Idx32++; 1152 } 1153 Idx64 = Idx32/2; 1154 } else { 1155 while (Idx64 < Num64) { 1156 if (Phys64[Idx64] == PReg) 1157 break; 1158 Idx64++; 1159 } 1160 Idx32 = Idx64*2+1; 1161 } 1162 1163 if (Width <= 32) 1164 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0; 1165 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0; 1166} 1167 1168 1169unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const { 1170 typedef MachineRegisterInfo::livein_iterator iterator; 1171 for (iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) { 1172 if (I->first == PReg) 1173 return I->second; 1174 } 1175 return 0; 1176} 1177