1// Copyright 2013, ARM Limited 2// All rights reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions are met: 6// 7// * Redistributions of source code must retain the above copyright notice, 8// this list of conditions and the following disclaimer. 9// * Redistributions in binary form must reproduce the above copyright notice, 10// this list of conditions and the following disclaimer in the documentation 11// and/or other materials provided with the distribution. 12// * Neither the name of ARM Limited nor the names of its contributors may be 13// used to endorse or promote products derived from this software without 14// specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27#include "a64/instructions-a64.h" 28#include "a64/assembler-a64.h" 29 30namespace vixl { 31 32 33static uint64_t RotateRight(uint64_t value, 34 unsigned int rotate, 35 unsigned int width) { 36 VIXL_ASSERT(width <= 64); 37 rotate &= 63; 38 return ((value & ((UINT64_C(1) << rotate) - 1)) << 39 (width - rotate)) | (value >> rotate); 40} 41 42 43static uint64_t RepeatBitsAcrossReg(unsigned reg_size, 44 uint64_t value, 45 unsigned width) { 46 VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || 47 (width == 32)); 48 VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); 49 uint64_t result = value & ((UINT64_C(1) << width) - 1); 50 for (unsigned i = width; i < reg_size; i *= 2) { 51 result |= (result << i); 52 } 53 return result; 54} 55 56 57// Logical immediates can't encode zero, so a return value of zero is used to 58// indicate a failure case. Specifically, where the constraints on imm_s are 59// not met. 60uint64_t Instruction::ImmLogical() { 61 unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; 62 int64_t n = BitN(); 63 int64_t imm_s = ImmSetBits(); 64 int64_t imm_r = ImmRotate(); 65 66 // An integer is constructed from the n, imm_s and imm_r bits according to 67 // the following table: 68 // 69 // N imms immr size S R 70 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) 71 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) 72 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) 73 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) 74 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) 75 // 0 11110s xxxxxr 2 UInt(s) UInt(r) 76 // (s bits must not be all set) 77 // 78 // A pattern is constructed of size bits, where the least significant S+1 79 // bits are set. The pattern is rotated right by R, and repeated across a 80 // 32 or 64-bit value, depending on destination register width. 81 // 82 83 if (n == 1) { 84 if (imm_s == 0x3F) { 85 return 0; 86 } 87 uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1; 88 return RotateRight(bits, imm_r, 64); 89 } else { 90 if ((imm_s >> 1) == 0x1F) { 91 return 0; 92 } 93 for (int width = 0x20; width >= 0x2; width >>= 1) { 94 if ((imm_s & width) == 0) { 95 int mask = width - 1; 96 if ((imm_s & mask) == mask) { 97 return 0; 98 } 99 uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1; 100 return RepeatBitsAcrossReg(reg_size, 101 RotateRight(bits, imm_r & mask, width), 102 width); 103 } 104 } 105 } 106 VIXL_UNREACHABLE(); 107 return 0; 108} 109 110 111float Instruction::ImmFP32() { 112 // ImmFP: abcdefgh (8 bits) 113 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) 114 // where B is b ^ 1 115 uint32_t bits = ImmFP(); 116 uint32_t bit7 = (bits >> 7) & 0x1; 117 uint32_t bit6 = (bits >> 6) & 0x1; 118 uint32_t bit5_to_0 = bits & 0x3f; 119 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); 120 121 return rawbits_to_float(result); 122} 123 124 125double Instruction::ImmFP64() { 126 // ImmFP: abcdefgh (8 bits) 127 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 128 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) 129 // where B is b ^ 1 130 uint32_t bits = ImmFP(); 131 uint64_t bit7 = (bits >> 7) & 0x1; 132 uint64_t bit6 = (bits >> 6) & 0x1; 133 uint64_t bit5_to_0 = bits & 0x3f; 134 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); 135 136 return rawbits_to_double(result); 137} 138 139 140LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { 141 switch (op) { 142 case STP_x: 143 case LDP_x: 144 case STP_d: 145 case LDP_d: return LSDoubleWord; 146 default: return LSWord; 147 } 148} 149 150 151Instruction* Instruction::ImmPCOffsetTarget() { 152 ptrdiff_t offset; 153 if (IsPCRelAddressing()) { 154 // PC-relative addressing. Only ADR is supported. 155 offset = ImmPCRel(); 156 } else { 157 // All PC-relative branches. 158 VIXL_ASSERT(BranchType() != UnknownBranchType); 159 // Relative branch offsets are instruction-size-aligned. 160 offset = ImmBranch() << kInstructionSizeLog2; 161 } 162 return this + offset; 163} 164 165 166inline int Instruction::ImmBranch() const { 167 switch (BranchType()) { 168 case CondBranchType: return ImmCondBranch(); 169 case UncondBranchType: return ImmUncondBranch(); 170 case CompareBranchType: return ImmCmpBranch(); 171 case TestBranchType: return ImmTestBranch(); 172 default: VIXL_UNREACHABLE(); 173 } 174 return 0; 175} 176 177 178void Instruction::SetImmPCOffsetTarget(Instruction* target) { 179 if (IsPCRelAddressing()) { 180 SetPCRelImmTarget(target); 181 } else { 182 SetBranchImmTarget(target); 183 } 184} 185 186 187void Instruction::SetPCRelImmTarget(Instruction* target) { 188 // ADRP is not supported, so 'this' must point to an ADR instruction. 189 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); 190 191 Instr imm = Assembler::ImmPCRelAddress(target - this); 192 193 SetInstructionBits(Mask(~ImmPCRel_mask) | imm); 194} 195 196 197void Instruction::SetBranchImmTarget(Instruction* target) { 198 VIXL_ASSERT(((target - this) & 3) == 0); 199 Instr branch_imm = 0; 200 uint32_t imm_mask = 0; 201 int offset = (target - this) >> kInstructionSizeLog2; 202 switch (BranchType()) { 203 case CondBranchType: { 204 branch_imm = Assembler::ImmCondBranch(offset); 205 imm_mask = ImmCondBranch_mask; 206 break; 207 } 208 case UncondBranchType: { 209 branch_imm = Assembler::ImmUncondBranch(offset); 210 imm_mask = ImmUncondBranch_mask; 211 break; 212 } 213 case CompareBranchType: { 214 branch_imm = Assembler::ImmCmpBranch(offset); 215 imm_mask = ImmCmpBranch_mask; 216 break; 217 } 218 case TestBranchType: { 219 branch_imm = Assembler::ImmTestBranch(offset); 220 imm_mask = ImmTestBranch_mask; 221 break; 222 } 223 default: VIXL_UNREACHABLE(); 224 } 225 SetInstructionBits(Mask(~imm_mask) | branch_imm); 226} 227 228 229void Instruction::SetImmLLiteral(Instruction* source) { 230 VIXL_ASSERT(((source - this) & 3) == 0); 231 int offset = (source - this) >> kLiteralEntrySizeLog2; 232 Instr imm = Assembler::ImmLLiteral(offset); 233 Instr mask = ImmLLiteral_mask; 234 235 SetInstructionBits(Mask(~mask) | imm); 236} 237} // namespace vixl 238 239