1//===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes the ARM VFP instruction set. 11// 12//===----------------------------------------------------------------------===// 13 14def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>; 15def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>; 16def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>; 17def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, 18 SDTCisSameAs<1, 2>]>; 19 20def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>; 21def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>; 22def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>; 23def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>; 24def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>; 25def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>; 26def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>; 27def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; 28 29 30//===----------------------------------------------------------------------===// 31// Operand Definitions. 32// 33 34// 8-bit floating-point immediate encodings. 35def FPImmOperand : AsmOperandClass { 36 let Name = "FPImm"; 37 let ParserMethod = "parseFPImm"; 38} 39 40def vfp_f32imm : Operand<f32>, 41 PatLeaf<(f32 fpimm), [{ 42 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1; 43 }], SDNodeXForm<fpimm, [{ 44 APFloat InVal = N->getValueAPF(); 45 uint32_t enc = ARM_AM::getFP32Imm(InVal); 46 return CurDAG->getTargetConstant(enc, MVT::i32); 47 }]>> { 48 let PrintMethod = "printFPImmOperand"; 49 let ParserMatchClass = FPImmOperand; 50} 51 52def vfp_f64imm : Operand<f64>, 53 PatLeaf<(f64 fpimm), [{ 54 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1; 55 }], SDNodeXForm<fpimm, [{ 56 APFloat InVal = N->getValueAPF(); 57 uint32_t enc = ARM_AM::getFP64Imm(InVal); 58 return CurDAG->getTargetConstant(enc, MVT::i32); 59 }]>> { 60 let PrintMethod = "printFPImmOperand"; 61 let ParserMatchClass = FPImmOperand; 62} 63 64def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 65 return cast<LoadSDNode>(N)->getAlignment() >= 4; 66}]>; 67 68def alignedstore32 : PatFrag<(ops node:$val, node:$ptr), 69 (store node:$val, node:$ptr), [{ 70 return cast<StoreSDNode>(N)->getAlignment() >= 4; 71}]>; 72 73// The VCVT to/from fixed-point instructions encode the 'fbits' operand 74// (the number of fixed bits) differently than it appears in the assembly 75// source. It's encoded as "Size - fbits" where Size is the size of the 76// fixed-point representation (32 or 16) and fbits is the value appearing 77// in the assembly source, an integer in [0,16] or (0,32], depending on size. 78def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; } 79def fbits32 : Operand<i32> { 80 let PrintMethod = "printFBits32"; 81 let ParserMatchClass = fbits32_asm_operand; 82} 83 84def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; } 85def fbits16 : Operand<i32> { 86 let PrintMethod = "printFBits16"; 87 let ParserMatchClass = fbits16_asm_operand; 88} 89 90//===----------------------------------------------------------------------===// 91// Load / store Instructions. 92// 93 94let canFoldAsLoad = 1, isReMaterializable = 1 in { 95 96def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr), 97 IIC_fpLoad64, "vldr", "\t$Dd, $addr", 98 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>; 99 100def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr), 101 IIC_fpLoad32, "vldr", "\t$Sd, $addr", 102 [(set SPR:$Sd, (load addrmode5:$addr))]> { 103 // Some single precision VFP instructions may be executed on both NEON and VFP 104 // pipelines. 105 let D = VFPNeonDomain; 106} 107 108} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in' 109 110def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr), 111 IIC_fpStore64, "vstr", "\t$Dd, $addr", 112 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>; 113 114def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr), 115 IIC_fpStore32, "vstr", "\t$Sd, $addr", 116 [(store SPR:$Sd, addrmode5:$addr)]> { 117 // Some single precision VFP instructions may be executed on both NEON and VFP 118 // pipelines. 119 let D = VFPNeonDomain; 120} 121 122//===----------------------------------------------------------------------===// 123// Load / store multiple Instructions. 124// 125 126multiclass vfp_ldst_mult<string asm, bit L_bit, 127 InstrItinClass itin, InstrItinClass itin_upd> { 128 // Double Precision 129 def DIA : 130 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 131 IndexModeNone, itin, 132 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 133 let Inst{24-23} = 0b01; // Increment After 134 let Inst{21} = 0; // No writeback 135 let Inst{20} = L_bit; 136 } 137 def DIA_UPD : 138 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 139 variable_ops), 140 IndexModeUpd, itin_upd, 141 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 142 let Inst{24-23} = 0b01; // Increment After 143 let Inst{21} = 1; // Writeback 144 let Inst{20} = L_bit; 145 } 146 def DDB_UPD : 147 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 148 variable_ops), 149 IndexModeUpd, itin_upd, 150 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 151 let Inst{24-23} = 0b10; // Decrement Before 152 let Inst{21} = 1; // Writeback 153 let Inst{20} = L_bit; 154 } 155 156 // Single Precision 157 def SIA : 158 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops), 159 IndexModeNone, itin, 160 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 161 let Inst{24-23} = 0b01; // Increment After 162 let Inst{21} = 0; // No writeback 163 let Inst{20} = L_bit; 164 165 // Some single precision VFP instructions may be executed on both NEON and 166 // VFP pipelines. 167 let D = VFPNeonDomain; 168 } 169 def SIA_UPD : 170 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 171 variable_ops), 172 IndexModeUpd, itin_upd, 173 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 174 let Inst{24-23} = 0b01; // Increment After 175 let Inst{21} = 1; // Writeback 176 let Inst{20} = L_bit; 177 178 // Some single precision VFP instructions may be executed on both NEON and 179 // VFP pipelines. 180 let D = VFPNeonDomain; 181 } 182 def SDB_UPD : 183 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 184 variable_ops), 185 IndexModeUpd, itin_upd, 186 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 187 let Inst{24-23} = 0b10; // Decrement Before 188 let Inst{21} = 1; // Writeback 189 let Inst{20} = L_bit; 190 191 // Some single precision VFP instructions may be executed on both NEON and 192 // VFP pipelines. 193 let D = VFPNeonDomain; 194 } 195} 196 197let neverHasSideEffects = 1 in { 198 199let mayLoad = 1, hasExtraDefRegAllocReq = 1 in 200defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>; 201 202let mayStore = 1, hasExtraSrcRegAllocReq = 1 in 203defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>; 204 205} // neverHasSideEffects 206 207def : MnemonicAlias<"vldm", "vldmia">; 208def : MnemonicAlias<"vstm", "vstmia">; 209 210// FLDM/FSTM - Load / Store multiple single / double precision registers for 211// pre-ARMv6 cores. 212// These instructions are deprecated! 213def : VFP2MnemonicAlias<"fldmias", "vldmia">; 214def : VFP2MnemonicAlias<"fldmdbs", "vldmdb">; 215def : VFP2MnemonicAlias<"fldmeas", "vldmdb">; 216def : VFP2MnemonicAlias<"fldmfds", "vldmia">; 217def : VFP2MnemonicAlias<"fldmiad", "vldmia">; 218def : VFP2MnemonicAlias<"fldmdbd", "vldmdb">; 219def : VFP2MnemonicAlias<"fldmead", "vldmdb">; 220def : VFP2MnemonicAlias<"fldmfdd", "vldmia">; 221 222def : VFP2MnemonicAlias<"fstmias", "vstmia">; 223def : VFP2MnemonicAlias<"fstmdbs", "vstmdb">; 224def : VFP2MnemonicAlias<"fstmeas", "vstmia">; 225def : VFP2MnemonicAlias<"fstmfds", "vstmdb">; 226def : VFP2MnemonicAlias<"fstmiad", "vstmia">; 227def : VFP2MnemonicAlias<"fstmdbd", "vstmdb">; 228def : VFP2MnemonicAlias<"fstmead", "vstmia">; 229def : VFP2MnemonicAlias<"fstmfdd", "vstmdb">; 230 231def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>, 232 Requires<[HasVFP2]>; 233def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>, 234 Requires<[HasVFP2]>; 235def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>, 236 Requires<[HasVFP2]>; 237def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>, 238 Requires<[HasVFP2]>; 239defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 240 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>; 241defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 242 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>; 243defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 244 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>; 245defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 246 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>; 247 248// FLDMX, FSTMX - Load and store multiple unknown precision registers for 249// pre-armv6 cores. 250// These instruction are deprecated so we don't want them to get selected. 251multiclass vfp_ldstx_mult<string asm, bit L_bit> { 252 // Unknown precision 253 def XIA : 254 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 255 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> { 256 let Inst{24-23} = 0b01; // Increment After 257 let Inst{21} = 0; // No writeback 258 let Inst{20} = L_bit; 259 } 260 def XIA_UPD : 261 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 262 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 263 let Inst{24-23} = 0b01; // Increment After 264 let Inst{21} = 1; // Writeback 265 let Inst{20} = L_bit; 266 } 267 def XDB_UPD : 268 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 269 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 270 let Inst{24-23} = 0b10; // Decrement Before 271 let Inst{21} = 1; // Writeback 272 let Inst{20} = L_bit; 273 } 274} 275 276defm FLDM : vfp_ldstx_mult<"fldm", 1>; 277defm FSTM : vfp_ldstx_mult<"fstm", 0>; 278 279def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">; 280def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">; 281 282def : VFP2MnemonicAlias<"fstmeax", "fstmiax">; 283def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">; 284 285//===----------------------------------------------------------------------===// 286// FP Binary Operations. 287// 288 289let TwoOperandAliasConstraint = "$Dn = $Dd" in 290def VADDD : ADbI<0b11100, 0b11, 0, 0, 291 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 292 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm", 293 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>; 294 295let TwoOperandAliasConstraint = "$Sn = $Sd" in 296def VADDS : ASbIn<0b11100, 0b11, 0, 0, 297 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 298 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm", 299 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> { 300 // Some single precision VFP instructions may be executed on both NEON and 301 // VFP pipelines on A8. 302 let D = VFPNeonA8Domain; 303} 304 305let TwoOperandAliasConstraint = "$Dn = $Dd" in 306def VSUBD : ADbI<0b11100, 0b11, 1, 0, 307 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 308 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm", 309 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>; 310 311let TwoOperandAliasConstraint = "$Sn = $Sd" in 312def VSUBS : ASbIn<0b11100, 0b11, 1, 0, 313 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 314 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm", 315 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> { 316 // Some single precision VFP instructions may be executed on both NEON and 317 // VFP pipelines on A8. 318 let D = VFPNeonA8Domain; 319} 320 321let TwoOperandAliasConstraint = "$Dn = $Dd" in 322def VDIVD : ADbI<0b11101, 0b00, 0, 0, 323 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 324 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm", 325 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>; 326 327let TwoOperandAliasConstraint = "$Sn = $Sd" in 328def VDIVS : ASbI<0b11101, 0b00, 0, 0, 329 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 330 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm", 331 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>; 332 333let TwoOperandAliasConstraint = "$Dn = $Dd" in 334def VMULD : ADbI<0b11100, 0b10, 0, 0, 335 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 336 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm", 337 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>; 338 339let TwoOperandAliasConstraint = "$Sn = $Sd" in 340def VMULS : ASbIn<0b11100, 0b10, 0, 0, 341 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 342 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm", 343 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> { 344 // Some single precision VFP instructions may be executed on both NEON and 345 // VFP pipelines on A8. 346 let D = VFPNeonA8Domain; 347} 348 349def VNMULD : ADbI<0b11100, 0b10, 1, 0, 350 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 351 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm", 352 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>; 353 354def VNMULS : ASbI<0b11100, 0b10, 1, 0, 355 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 356 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm", 357 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> { 358 // Some single precision VFP instructions may be executed on both NEON and 359 // VFP pipelines on A8. 360 let D = VFPNeonA8Domain; 361} 362 363multiclass vsel_inst<string op, bits<2> opc, int CC> { 364 let DecoderNamespace = "VFPV8", PostEncoderMethod = "", 365 Uses = [CPSR], AddedComplexity = 4 in { 366 def S : ASbInp<0b11100, opc, 0, 367 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 368 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"), 369 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>, 370 Requires<[HasFPARMv8]>; 371 372 def D : ADbInp<0b11100, opc, 0, 373 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 374 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"), 375 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>, 376 Requires<[HasFPARMv8, HasDPVFP]>; 377 } 378} 379 380// The CC constants here match ARMCC::CondCodes. 381defm VSELGT : vsel_inst<"gt", 0b11, 12>; 382defm VSELGE : vsel_inst<"ge", 0b10, 10>; 383defm VSELEQ : vsel_inst<"eq", 0b00, 0>; 384defm VSELVS : vsel_inst<"vs", 0b01, 6>; 385 386multiclass vmaxmin_inst<string op, bit opc, SDNode SD> { 387 let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in { 388 def S : ASbInp<0b11101, 0b00, opc, 389 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 390 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"), 391 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>, 392 Requires<[HasFPARMv8]>; 393 394 def D : ADbInp<0b11101, 0b00, opc, 395 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 396 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"), 397 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>, 398 Requires<[HasFPARMv8, HasDPVFP]>; 399 } 400} 401 402defm VMAXNM : vmaxmin_inst<"vmaxnm", 0, ARMvmaxnm>; 403defm VMINNM : vmaxmin_inst<"vminnm", 1, ARMvminnm>; 404 405// Match reassociated forms only if not sign dependent rounding. 406def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)), 407 (VNMULD DPR:$a, DPR:$b)>, 408 Requires<[NoHonorSignDependentRounding,HasDPVFP]>; 409def : Pat<(fmul (fneg SPR:$a), SPR:$b), 410 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>; 411 412// These are encoded as unary instructions. 413let Defs = [FPSCR_NZCV] in { 414def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, 415 (outs), (ins DPR:$Dd, DPR:$Dm), 416 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", 417 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>; 418 419def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, 420 (outs), (ins SPR:$Sd, SPR:$Sm), 421 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", 422 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> { 423 // Some single precision VFP instructions may be executed on both NEON and 424 // VFP pipelines on A8. 425 let D = VFPNeonA8Domain; 426} 427 428// FIXME: Verify encoding after integrated assembler is working. 429def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, 430 (outs), (ins DPR:$Dd, DPR:$Dm), 431 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", 432 [/* For disassembly only; pattern left blank */]>; 433 434def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, 435 (outs), (ins SPR:$Sd, SPR:$Sm), 436 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", 437 [/* For disassembly only; pattern left blank */]> { 438 // Some single precision VFP instructions may be executed on both NEON and 439 // VFP pipelines on A8. 440 let D = VFPNeonA8Domain; 441} 442} // Defs = [FPSCR_NZCV] 443 444//===----------------------------------------------------------------------===// 445// FP Unary Operations. 446// 447 448def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, 449 (outs DPR:$Dd), (ins DPR:$Dm), 450 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm", 451 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>; 452 453def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0, 454 (outs SPR:$Sd), (ins SPR:$Sm), 455 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm", 456 [(set SPR:$Sd, (fabs SPR:$Sm))]> { 457 // Some single precision VFP instructions may be executed on both NEON and 458 // VFP pipelines on A8. 459 let D = VFPNeonA8Domain; 460} 461 462let Defs = [FPSCR_NZCV] in { 463def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, 464 (outs), (ins DPR:$Dd), 465 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", 466 [(arm_cmpfp0 (f64 DPR:$Dd))]> { 467 let Inst{3-0} = 0b0000; 468 let Inst{5} = 0; 469} 470 471def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, 472 (outs), (ins SPR:$Sd), 473 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", 474 [(arm_cmpfp0 SPR:$Sd)]> { 475 let Inst{3-0} = 0b0000; 476 let Inst{5} = 0; 477 478 // Some single precision VFP instructions may be executed on both NEON and 479 // VFP pipelines on A8. 480 let D = VFPNeonA8Domain; 481} 482 483// FIXME: Verify encoding after integrated assembler is working. 484def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, 485 (outs), (ins DPR:$Dd), 486 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", 487 [/* For disassembly only; pattern left blank */]> { 488 let Inst{3-0} = 0b0000; 489 let Inst{5} = 0; 490} 491 492def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, 493 (outs), (ins SPR:$Sd), 494 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", 495 [/* For disassembly only; pattern left blank */]> { 496 let Inst{3-0} = 0b0000; 497 let Inst{5} = 0; 498 499 // Some single precision VFP instructions may be executed on both NEON and 500 // VFP pipelines on A8. 501 let D = VFPNeonA8Domain; 502} 503} // Defs = [FPSCR_NZCV] 504 505def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, 506 (outs DPR:$Dd), (ins SPR:$Sm), 507 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", 508 [(set DPR:$Dd, (fextend SPR:$Sm))]> { 509 // Instruction operands. 510 bits<5> Dd; 511 bits<5> Sm; 512 513 // Encode instruction operands. 514 let Inst{3-0} = Sm{4-1}; 515 let Inst{5} = Sm{0}; 516 let Inst{15-12} = Dd{3-0}; 517 let Inst{22} = Dd{4}; 518} 519 520// Special case encoding: bits 11-8 is 0b1011. 521def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm, 522 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", 523 [(set SPR:$Sd, (fround DPR:$Dm))]> { 524 // Instruction operands. 525 bits<5> Sd; 526 bits<5> Dm; 527 528 // Encode instruction operands. 529 let Inst{3-0} = Dm{3-0}; 530 let Inst{5} = Dm{4}; 531 let Inst{15-12} = Sd{4-1}; 532 let Inst{22} = Sd{0}; 533 534 let Inst{27-23} = 0b11101; 535 let Inst{21-16} = 0b110111; 536 let Inst{11-8} = 0b1011; 537 let Inst{7-6} = 0b11; 538 let Inst{4} = 0; 539 540 let Predicates = [HasVFP2, HasDPVFP]; 541} 542 543// Between half, single and double-precision. For disassembly only. 544 545// FIXME: Verify encoding after integrated assembler is working. 546def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 547 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", 548 [/* For disassembly only; pattern left blank */]>; 549 550def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 551 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", 552 [/* For disassembly only; pattern left blank */]>; 553 554def : Pat<(f32_to_f16 SPR:$a), 555 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>; 556 557def : Pat<(f16_to_f32 GPR:$a), 558 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>; 559 560def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 561 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", 562 [/* For disassembly only; pattern left blank */]>; 563 564def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 565 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", 566 [/* For disassembly only; pattern left blank */]>; 567 568def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0, 569 (outs DPR:$Dd), (ins SPR:$Sm), 570 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", 571 []>, Requires<[HasFPARMv8, HasDPVFP]> { 572 // Instruction operands. 573 bits<5> Sm; 574 575 // Encode instruction operands. 576 let Inst{3-0} = Sm{4-1}; 577 let Inst{5} = Sm{0}; 578} 579 580def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0, 581 (outs SPR:$Sd), (ins DPR:$Dm), 582 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", 583 []>, Requires<[HasFPARMv8, HasDPVFP]> { 584 // Instruction operands. 585 bits<5> Sd; 586 bits<5> Dm; 587 588 // Encode instruction operands. 589 let Inst{3-0} = Dm{3-0}; 590 let Inst{5} = Dm{4}; 591 let Inst{15-12} = Sd{4-1}; 592 let Inst{22} = Sd{0}; 593} 594 595def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0, 596 (outs DPR:$Dd), (ins SPR:$Sm), 597 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", 598 []>, Requires<[HasFPARMv8, HasDPVFP]> { 599 // Instruction operands. 600 bits<5> Sm; 601 602 // Encode instruction operands. 603 let Inst{3-0} = Sm{4-1}; 604 let Inst{5} = Sm{0}; 605} 606 607def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0, 608 (outs SPR:$Sd), (ins DPR:$Dm), 609 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", 610 []>, Requires<[HasFPARMv8, HasDPVFP]> { 611 // Instruction operands. 612 bits<5> Sd; 613 bits<5> Dm; 614 615 // Encode instruction operands. 616 let Inst{15-12} = Sd{4-1}; 617 let Inst{22} = Sd{0}; 618 let Inst{3-0} = Dm{3-0}; 619 let Inst{5} = Dm{4}; 620} 621 622multiclass vcvt_inst<string opc, bits<2> rm> { 623 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { 624 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 625 (outs SPR:$Sd), (ins SPR:$Sm), 626 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"), 627 []>, Requires<[HasFPARMv8]> { 628 let Inst{17-16} = rm; 629 } 630 631 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 632 (outs SPR:$Sd), (ins SPR:$Sm), 633 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"), 634 []>, Requires<[HasFPARMv8]> { 635 let Inst{17-16} = rm; 636 } 637 638 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 639 (outs SPR:$Sd), (ins DPR:$Dm), 640 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"), 641 []>, Requires<[HasFPARMv8, HasDPVFP]> { 642 bits<5> Dm; 643 644 let Inst{17-16} = rm; 645 646 // Encode instruction operands 647 let Inst{3-0} = Dm{3-0}; 648 let Inst{5} = Dm{4}; 649 let Inst{8} = 1; 650 } 651 652 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 653 (outs SPR:$Sd), (ins DPR:$Dm), 654 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"), 655 []>, Requires<[HasFPARMv8, HasDPVFP]> { 656 bits<5> Dm; 657 658 let Inst{17-16} = rm; 659 660 // Encode instruction operands 661 let Inst{3-0} = Dm{3-0}; 662 let Inst{5} = Dm{4}; 663 let Inst{8} = 1; 664 } 665 } 666} 667 668defm VCVTA : vcvt_inst<"a", 0b00>; 669defm VCVTN : vcvt_inst<"n", 0b01>; 670defm VCVTP : vcvt_inst<"p", 0b10>; 671defm VCVTM : vcvt_inst<"m", 0b11>; 672 673def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, 674 (outs DPR:$Dd), (ins DPR:$Dm), 675 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", 676 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>; 677 678def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0, 679 (outs SPR:$Sd), (ins SPR:$Sm), 680 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm", 681 [(set SPR:$Sd, (fneg SPR:$Sm))]> { 682 // Some single precision VFP instructions may be executed on both NEON and 683 // VFP pipelines on A8. 684 let D = VFPNeonA8Domain; 685} 686 687multiclass vrint_inst_zrx<string opc, bit op, bit op2> { 688 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0, 689 (outs SPR:$Sd), (ins SPR:$Sm), 690 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", 691 []>, Requires<[HasFPARMv8]> { 692 let Inst{7} = op2; 693 let Inst{16} = op; 694 } 695 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0, 696 (outs DPR:$Dd), (ins DPR:$Dm), 697 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", 698 []>, Requires<[HasFPARMv8, HasDPVFP]> { 699 let Inst{7} = op2; 700 let Inst{16} = op; 701 } 702 703 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"), 704 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p)>, 705 Requires<[HasFPARMv8]>; 706 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"), 707 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p)>, 708 Requires<[HasFPARMv8,HasDPVFP]>; 709} 710 711defm VRINTZ : vrint_inst_zrx<"z", 0, 1>; 712defm VRINTR : vrint_inst_zrx<"r", 0, 0>; 713defm VRINTX : vrint_inst_zrx<"x", 1, 0>; 714 715multiclass vrint_inst_anpm<string opc, bits<2> rm> { 716 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { 717 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0, 718 (outs SPR:$Sd), (ins SPR:$Sm), 719 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"), 720 []>, Requires<[HasFPARMv8]> { 721 let Inst{17-16} = rm; 722 } 723 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0, 724 (outs DPR:$Dd), (ins DPR:$Dm), 725 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"), 726 []>, Requires<[HasFPARMv8, HasDPVFP]> { 727 let Inst{17-16} = rm; 728 } 729 } 730 731 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"), 732 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm)>, 733 Requires<[HasFPARMv8]>; 734 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"), 735 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>, 736 Requires<[HasFPARMv8,HasDPVFP]>; 737} 738 739defm VRINTA : vrint_inst_anpm<"a", 0b00>; 740defm VRINTN : vrint_inst_anpm<"n", 0b01>; 741defm VRINTP : vrint_inst_anpm<"p", 0b10>; 742defm VRINTM : vrint_inst_anpm<"m", 0b11>; 743 744def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, 745 (outs DPR:$Dd), (ins DPR:$Dm), 746 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", 747 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>; 748 749def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, 750 (outs SPR:$Sd), (ins SPR:$Sm), 751 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", 752 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>; 753 754let neverHasSideEffects = 1 in { 755def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0, 756 (outs DPR:$Dd), (ins DPR:$Dm), 757 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>; 758 759def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0, 760 (outs SPR:$Sd), (ins SPR:$Sm), 761 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>; 762} // neverHasSideEffects 763 764//===----------------------------------------------------------------------===// 765// FP <-> GPR Copies. Int <-> FP Conversions. 766// 767 768def VMOVRS : AVConv2I<0b11100001, 0b1010, 769 (outs GPR:$Rt), (ins SPR:$Sn), 770 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn", 771 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> { 772 // Instruction operands. 773 bits<4> Rt; 774 bits<5> Sn; 775 776 // Encode instruction operands. 777 let Inst{19-16} = Sn{4-1}; 778 let Inst{7} = Sn{0}; 779 let Inst{15-12} = Rt; 780 781 let Inst{6-5} = 0b00; 782 let Inst{3-0} = 0b0000; 783 784 // Some single precision VFP instructions may be executed on both NEON and VFP 785 // pipelines. 786 let D = VFPNeonDomain; 787} 788 789// Bitcast i32 -> f32. NEON prefers to use VMOVDRR. 790def VMOVSR : AVConv4I<0b11100000, 0b1010, 791 (outs SPR:$Sn), (ins GPR:$Rt), 792 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt", 793 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>, 794 Requires<[HasVFP2, UseVMOVSR]> { 795 // Instruction operands. 796 bits<5> Sn; 797 bits<4> Rt; 798 799 // Encode instruction operands. 800 let Inst{19-16} = Sn{4-1}; 801 let Inst{7} = Sn{0}; 802 let Inst{15-12} = Rt; 803 804 let Inst{6-5} = 0b00; 805 let Inst{3-0} = 0b0000; 806 807 // Some single precision VFP instructions may be executed on both NEON and VFP 808 // pipelines. 809 let D = VFPNeonDomain; 810} 811 812let neverHasSideEffects = 1 in { 813def VMOVRRD : AVConv3I<0b11000101, 0b1011, 814 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm), 815 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm", 816 [/* FIXME: Can't write pattern for multiple result instr*/]> { 817 // Instruction operands. 818 bits<5> Dm; 819 bits<4> Rt; 820 bits<4> Rt2; 821 822 // Encode instruction operands. 823 let Inst{3-0} = Dm{3-0}; 824 let Inst{5} = Dm{4}; 825 let Inst{15-12} = Rt; 826 let Inst{19-16} = Rt2; 827 828 let Inst{7-6} = 0b00; 829 830 // Some single precision VFP instructions may be executed on both NEON and VFP 831 // pipelines. 832 let D = VFPNeonDomain; 833} 834 835def VMOVRRS : AVConv3I<0b11000101, 0b1010, 836 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2), 837 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2", 838 [/* For disassembly only; pattern left blank */]> { 839 bits<5> src1; 840 bits<4> Rt; 841 bits<4> Rt2; 842 843 // Encode instruction operands. 844 let Inst{3-0} = src1{4-1}; 845 let Inst{5} = src1{0}; 846 let Inst{15-12} = Rt; 847 let Inst{19-16} = Rt2; 848 849 let Inst{7-6} = 0b00; 850 851 // Some single precision VFP instructions may be executed on both NEON and VFP 852 // pipelines. 853 let D = VFPNeonDomain; 854 let DecoderMethod = "DecodeVMOVRRS"; 855} 856} // neverHasSideEffects 857 858// FMDHR: GPR -> SPR 859// FMDLR: GPR -> SPR 860 861def VMOVDRR : AVConv5I<0b11000100, 0b1011, 862 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2), 863 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2", 864 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> { 865 // Instruction operands. 866 bits<5> Dm; 867 bits<4> Rt; 868 bits<4> Rt2; 869 870 // Encode instruction operands. 871 let Inst{3-0} = Dm{3-0}; 872 let Inst{5} = Dm{4}; 873 let Inst{15-12} = Rt; 874 let Inst{19-16} = Rt2; 875 876 let Inst{7-6} = 0b00; 877 878 // Some single precision VFP instructions may be executed on both NEON and VFP 879 // pipelines. 880 let D = VFPNeonDomain; 881} 882 883let neverHasSideEffects = 1 in 884def VMOVSRR : AVConv5I<0b11000100, 0b1010, 885 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2), 886 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2", 887 [/* For disassembly only; pattern left blank */]> { 888 // Instruction operands. 889 bits<5> dst1; 890 bits<4> src1; 891 bits<4> src2; 892 893 // Encode instruction operands. 894 let Inst{3-0} = dst1{4-1}; 895 let Inst{5} = dst1{0}; 896 let Inst{15-12} = src1; 897 let Inst{19-16} = src2; 898 899 let Inst{7-6} = 0b00; 900 901 // Some single precision VFP instructions may be executed on both NEON and VFP 902 // pipelines. 903 let D = VFPNeonDomain; 904 905 let DecoderMethod = "DecodeVMOVSRR"; 906} 907 908// FMRDH: SPR -> GPR 909// FMRDL: SPR -> GPR 910// FMRRS: SPR -> GPR 911// FMRX: SPR system reg -> GPR 912// FMSRR: GPR -> SPR 913// FMXR: GPR -> VFP system reg 914 915 916// Int -> FP: 917 918class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 919 bits<4> opcod4, dag oops, dag iops, 920 InstrItinClass itin, string opc, string asm, 921 list<dag> pattern> 922 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 923 pattern> { 924 // Instruction operands. 925 bits<5> Dd; 926 bits<5> Sm; 927 928 // Encode instruction operands. 929 let Inst{3-0} = Sm{4-1}; 930 let Inst{5} = Sm{0}; 931 let Inst{15-12} = Dd{3-0}; 932 let Inst{22} = Dd{4}; 933 934 let Predicates = [HasVFP2, HasDPVFP]; 935} 936 937class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 938 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin, 939 string opc, string asm, list<dag> pattern> 940 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 941 pattern> { 942 // Instruction operands. 943 bits<5> Sd; 944 bits<5> Sm; 945 946 // Encode instruction operands. 947 let Inst{3-0} = Sm{4-1}; 948 let Inst{5} = Sm{0}; 949 let Inst{15-12} = Sd{4-1}; 950 let Inst{22} = Sd{0}; 951} 952 953def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 954 (outs DPR:$Dd), (ins SPR:$Sm), 955 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm", 956 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> { 957 let Inst{7} = 1; // s32 958} 959 960def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 961 (outs SPR:$Sd),(ins SPR:$Sm), 962 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm", 963 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> { 964 let Inst{7} = 1; // s32 965 966 // Some single precision VFP instructions may be executed on both NEON and 967 // VFP pipelines on A8. 968 let D = VFPNeonA8Domain; 969} 970 971def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 972 (outs DPR:$Dd), (ins SPR:$Sm), 973 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm", 974 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> { 975 let Inst{7} = 0; // u32 976} 977 978def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 979 (outs SPR:$Sd), (ins SPR:$Sm), 980 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm", 981 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> { 982 let Inst{7} = 0; // u32 983 984 // Some single precision VFP instructions may be executed on both NEON and 985 // VFP pipelines on A8. 986 let D = VFPNeonA8Domain; 987} 988 989// FP -> Int: 990 991class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 992 bits<4> opcod4, dag oops, dag iops, 993 InstrItinClass itin, string opc, string asm, 994 list<dag> pattern> 995 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 996 pattern> { 997 // Instruction operands. 998 bits<5> Sd; 999 bits<5> Dm; 1000 1001 // Encode instruction operands. 1002 let Inst{3-0} = Dm{3-0}; 1003 let Inst{5} = Dm{4}; 1004 let Inst{15-12} = Sd{4-1}; 1005 let Inst{22} = Sd{0}; 1006 1007 let Predicates = [HasVFP2, HasDPVFP]; 1008} 1009 1010class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1011 bits<4> opcod4, dag oops, dag iops, 1012 InstrItinClass itin, string opc, string asm, 1013 list<dag> pattern> 1014 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1015 pattern> { 1016 // Instruction operands. 1017 bits<5> Sd; 1018 bits<5> Sm; 1019 1020 // Encode instruction operands. 1021 let Inst{3-0} = Sm{4-1}; 1022 let Inst{5} = Sm{0}; 1023 let Inst{15-12} = Sd{4-1}; 1024 let Inst{22} = Sd{0}; 1025} 1026 1027// Always set Z bit in the instruction, i.e. "round towards zero" variants. 1028def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1029 (outs SPR:$Sd), (ins DPR:$Dm), 1030 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm", 1031 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> { 1032 let Inst{7} = 1; // Z bit 1033} 1034 1035def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1036 (outs SPR:$Sd), (ins SPR:$Sm), 1037 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm", 1038 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> { 1039 let Inst{7} = 1; // Z bit 1040 1041 // Some single precision VFP instructions may be executed on both NEON and 1042 // VFP pipelines on A8. 1043 let D = VFPNeonA8Domain; 1044} 1045 1046def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1047 (outs SPR:$Sd), (ins DPR:$Dm), 1048 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm", 1049 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> { 1050 let Inst{7} = 1; // Z bit 1051} 1052 1053def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1054 (outs SPR:$Sd), (ins SPR:$Sm), 1055 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm", 1056 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> { 1057 let Inst{7} = 1; // Z bit 1058 1059 // Some single precision VFP instructions may be executed on both NEON and 1060 // VFP pipelines on A8. 1061 let D = VFPNeonA8Domain; 1062} 1063 1064// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR. 1065let Uses = [FPSCR] in { 1066// FIXME: Verify encoding after integrated assembler is working. 1067def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1068 (outs SPR:$Sd), (ins DPR:$Dm), 1069 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm", 1070 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{ 1071 let Inst{7} = 0; // Z bit 1072} 1073 1074def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1075 (outs SPR:$Sd), (ins SPR:$Sm), 1076 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm", 1077 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> { 1078 let Inst{7} = 0; // Z bit 1079} 1080 1081def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1082 (outs SPR:$Sd), (ins DPR:$Dm), 1083 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm", 1084 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{ 1085 let Inst{7} = 0; // Z bit 1086} 1087 1088def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1089 (outs SPR:$Sd), (ins SPR:$Sm), 1090 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm", 1091 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> { 1092 let Inst{7} = 0; // Z bit 1093} 1094} 1095 1096// Convert between floating-point and fixed-point 1097// Data type for fixed-point naming convention: 1098// S16 (U=0, sx=0) -> SH 1099// U16 (U=1, sx=0) -> UH 1100// S32 (U=0, sx=1) -> SL 1101// U32 (U=1, sx=1) -> UL 1102 1103let Constraints = "$a = $dst" in { 1104 1105// FP to Fixed-Point: 1106 1107// Single Precision register 1108class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1109 bit op5, dag oops, dag iops, InstrItinClass itin, 1110 string opc, string asm, list<dag> pattern> 1111 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern>, 1112 Sched<[WriteCvtFP]> { 1113 bits<5> dst; 1114 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1115 let Inst{22} = dst{0}; 1116 let Inst{15-12} = dst{4-1}; 1117} 1118 1119// Double Precision register 1120class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1121 bit op5, dag oops, dag iops, InstrItinClass itin, 1122 string opc, string asm, list<dag> pattern> 1123 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern>, 1124 Sched<[WriteCvtFP]> { 1125 bits<5> dst; 1126 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1127 let Inst{22} = dst{4}; 1128 let Inst{15-12} = dst{3-0}; 1129 1130 let Predicates = [HasVFP2, HasDPVFP]; 1131} 1132 1133def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0, 1134 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1135 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []> { 1136 // Some single precision VFP instructions may be executed on both NEON and 1137 // VFP pipelines on A8. 1138 let D = VFPNeonA8Domain; 1139} 1140 1141def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0, 1142 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1143 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> { 1144 // Some single precision VFP instructions may be executed on both NEON and 1145 // VFP pipelines on A8. 1146 let D = VFPNeonA8Domain; 1147} 1148 1149def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1, 1150 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1151 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> { 1152 // Some single precision VFP instructions may be executed on both NEON and 1153 // VFP pipelines on A8. 1154 let D = VFPNeonA8Domain; 1155} 1156 1157def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1, 1158 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1159 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> { 1160 // Some single precision VFP instructions may be executed on both NEON and 1161 // VFP pipelines on A8. 1162 let D = VFPNeonA8Domain; 1163} 1164 1165def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0, 1166 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1167 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>; 1168 1169def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0, 1170 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1171 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>; 1172 1173def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1, 1174 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1175 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>; 1176 1177def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1, 1178 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1179 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>; 1180 1181// Fixed-Point to FP: 1182 1183def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0, 1184 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1185 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []> { 1186 // Some single precision VFP instructions may be executed on both NEON and 1187 // VFP pipelines on A8. 1188 let D = VFPNeonA8Domain; 1189} 1190 1191def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0, 1192 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1193 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []> { 1194 // Some single precision VFP instructions may be executed on both NEON and 1195 // VFP pipelines on A8. 1196 let D = VFPNeonA8Domain; 1197} 1198 1199def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1, 1200 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1201 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []> { 1202 // Some single precision VFP instructions may be executed on both NEON and 1203 // VFP pipelines on A8. 1204 let D = VFPNeonA8Domain; 1205} 1206 1207def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1, 1208 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1209 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []> { 1210 // Some single precision VFP instructions may be executed on both NEON and 1211 // VFP pipelines on A8. 1212 let D = VFPNeonA8Domain; 1213} 1214 1215def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0, 1216 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1217 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>; 1218 1219def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0, 1220 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1221 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>; 1222 1223def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1, 1224 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1225 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>; 1226 1227def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1, 1228 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1229 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>; 1230 1231} // End of 'let Constraints = "$a = $dst" in' 1232 1233//===----------------------------------------------------------------------===// 1234// FP Multiply-Accumulate Operations. 1235// 1236 1237def VMLAD : ADbI<0b11100, 0b00, 0, 0, 1238 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1239 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm", 1240 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1241 (f64 DPR:$Ddin)))]>, 1242 RegConstraint<"$Ddin = $Dd">, 1243 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1244 1245def VMLAS : ASbIn<0b11100, 0b00, 0, 0, 1246 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1247 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm", 1248 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 1249 SPR:$Sdin))]>, 1250 RegConstraint<"$Sdin = $Sd">, 1251 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> { 1252 // Some single precision VFP instructions may be executed on both NEON and 1253 // VFP pipelines on A8. 1254 let D = VFPNeonA8Domain; 1255} 1256 1257def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 1258 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 1259 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1260def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 1261 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 1262 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>; 1263 1264def VMLSD : ADbI<0b11100, 0b00, 1, 0, 1265 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1266 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm", 1267 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 1268 (f64 DPR:$Ddin)))]>, 1269 RegConstraint<"$Ddin = $Dd">, 1270 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1271 1272def VMLSS : ASbIn<0b11100, 0b00, 1, 0, 1273 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1274 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm", 1275 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 1276 SPR:$Sdin))]>, 1277 RegConstraint<"$Sdin = $Sd">, 1278 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> { 1279 // Some single precision VFP instructions may be executed on both NEON and 1280 // VFP pipelines on A8. 1281 let D = VFPNeonA8Domain; 1282} 1283 1284def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 1285 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 1286 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1287def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 1288 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 1289 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1290 1291def VNMLAD : ADbI<0b11100, 0b01, 1, 0, 1292 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1293 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm", 1294 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 1295 (f64 DPR:$Ddin)))]>, 1296 RegConstraint<"$Ddin = $Dd">, 1297 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1298 1299def VNMLAS : ASbI<0b11100, 0b01, 1, 0, 1300 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1301 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm", 1302 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 1303 SPR:$Sdin))]>, 1304 RegConstraint<"$Sdin = $Sd">, 1305 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> { 1306 // Some single precision VFP instructions may be executed on both NEON and 1307 // VFP pipelines on A8. 1308 let D = VFPNeonA8Domain; 1309} 1310 1311def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 1312 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 1313 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1314def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 1315 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 1316 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1317 1318def VNMLSD : ADbI<0b11100, 0b01, 0, 0, 1319 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1320 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm", 1321 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1322 (f64 DPR:$Ddin)))]>, 1323 RegConstraint<"$Ddin = $Dd">, 1324 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1325 1326def VNMLSS : ASbI<0b11100, 0b01, 0, 0, 1327 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1328 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm", 1329 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 1330 RegConstraint<"$Sdin = $Sd">, 1331 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> { 1332 // Some single precision VFP instructions may be executed on both NEON and 1333 // VFP pipelines on A8. 1334 let D = VFPNeonA8Domain; 1335} 1336 1337def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 1338 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 1339 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1340def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 1341 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 1342 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1343 1344//===----------------------------------------------------------------------===// 1345// Fused FP Multiply-Accumulate Operations. 1346// 1347def VFMAD : ADbI<0b11101, 0b10, 0, 0, 1348 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1349 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm", 1350 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1351 (f64 DPR:$Ddin)))]>, 1352 RegConstraint<"$Ddin = $Dd">, 1353 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1354 1355def VFMAS : ASbIn<0b11101, 0b10, 0, 0, 1356 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1357 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm", 1358 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 1359 SPR:$Sdin))]>, 1360 RegConstraint<"$Sdin = $Sd">, 1361 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> { 1362 // Some single precision VFP instructions may be executed on both NEON and 1363 // VFP pipelines. 1364} 1365 1366def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 1367 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>, 1368 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1369def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 1370 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>, 1371 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 1372 1373// Match @llvm.fma.* intrinsics 1374// (fma x, y, z) -> (vfms z, x, y) 1375def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)), 1376 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1377 Requires<[HasVFP4,HasDPVFP]>; 1378def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)), 1379 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1380 Requires<[HasVFP4]>; 1381 1382def VFMSD : ADbI<0b11101, 0b10, 1, 0, 1383 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1384 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm", 1385 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 1386 (f64 DPR:$Ddin)))]>, 1387 RegConstraint<"$Ddin = $Dd">, 1388 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1389 1390def VFMSS : ASbIn<0b11101, 0b10, 1, 0, 1391 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1392 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm", 1393 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 1394 SPR:$Sdin))]>, 1395 RegConstraint<"$Sdin = $Sd">, 1396 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> { 1397 // Some single precision VFP instructions may be executed on both NEON and 1398 // VFP pipelines. 1399} 1400 1401def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 1402 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>, 1403 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1404def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 1405 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>, 1406 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 1407 1408// Match @llvm.fma.* intrinsics 1409// (fma (fneg x), y, z) -> (vfms z, x, y) 1410def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)), 1411 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1412 Requires<[HasVFP4,HasDPVFP]>; 1413def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)), 1414 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1415 Requires<[HasVFP4]>; 1416// (fma x, (fneg y), z) -> (vfms z, x, y) 1417def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)), 1418 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1419 Requires<[HasVFP4,HasDPVFP]>; 1420def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)), 1421 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1422 Requires<[HasVFP4]>; 1423 1424def VFNMAD : ADbI<0b11101, 0b01, 1, 0, 1425 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1426 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm", 1427 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 1428 (f64 DPR:$Ddin)))]>, 1429 RegConstraint<"$Ddin = $Dd">, 1430 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1431 1432def VFNMAS : ASbI<0b11101, 0b01, 1, 0, 1433 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1434 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm", 1435 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 1436 SPR:$Sdin))]>, 1437 RegConstraint<"$Sdin = $Sd">, 1438 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> { 1439 // Some single precision VFP instructions may be executed on both NEON and 1440 // VFP pipelines. 1441} 1442 1443def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 1444 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>, 1445 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1446def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 1447 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>, 1448 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 1449 1450// Match @llvm.fma.* intrinsics 1451// (fneg (fma x, y, z)) -> (vfnma z, x, y) 1452def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))), 1453 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1454 Requires<[HasVFP4,HasDPVFP]>; 1455def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))), 1456 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1457 Requires<[HasVFP4]>; 1458// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y) 1459def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))), 1460 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1461 Requires<[HasVFP4,HasDPVFP]>; 1462def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))), 1463 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1464 Requires<[HasVFP4]>; 1465 1466def VFNMSD : ADbI<0b11101, 0b01, 0, 0, 1467 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1468 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm", 1469 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1470 (f64 DPR:$Ddin)))]>, 1471 RegConstraint<"$Ddin = $Dd">, 1472 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1473 1474def VFNMSS : ASbI<0b11101, 0b01, 0, 0, 1475 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1476 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm", 1477 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 1478 RegConstraint<"$Sdin = $Sd">, 1479 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> { 1480 // Some single precision VFP instructions may be executed on both NEON and 1481 // VFP pipelines. 1482} 1483 1484def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 1485 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>, 1486 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 1487def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 1488 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>, 1489 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 1490 1491// Match @llvm.fma.* intrinsics 1492 1493// (fma x, y, (fneg z)) -> (vfnms z, x, y)) 1494def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))), 1495 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1496 Requires<[HasVFP4,HasDPVFP]>; 1497def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))), 1498 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1499 Requires<[HasVFP4]>; 1500// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y) 1501def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))), 1502 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1503 Requires<[HasVFP4,HasDPVFP]>; 1504def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))), 1505 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1506 Requires<[HasVFP4]>; 1507// (fneg (fma x, (fneg y), z) -> (vfnms z, x, y) 1508def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))), 1509 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 1510 Requires<[HasVFP4,HasDPVFP]>; 1511def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))), 1512 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 1513 Requires<[HasVFP4]>; 1514 1515//===----------------------------------------------------------------------===// 1516// FP Conditional moves. 1517// 1518 1519let neverHasSideEffects = 1 in { 1520def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p), 1521 IIC_fpUNA64, 1522 [(set (f64 DPR:$Dd), 1523 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>, 1524 RegConstraint<"$Dn = $Dd">, Requires<[HasVFP2,HasDPVFP]>; 1525 1526def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p), 1527 IIC_fpUNA32, 1528 [(set (f32 SPR:$Sd), 1529 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>, 1530 RegConstraint<"$Sn = $Sd">, Requires<[HasVFP2]>; 1531} // neverHasSideEffects 1532 1533//===----------------------------------------------------------------------===// 1534// Move from VFP System Register to ARM core register. 1535// 1536 1537class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 1538 list<dag> pattern>: 1539 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> { 1540 1541 // Instruction operand. 1542 bits<4> Rt; 1543 1544 let Inst{27-20} = 0b11101111; 1545 let Inst{19-16} = opc19_16; 1546 let Inst{15-12} = Rt; 1547 let Inst{11-8} = 0b1010; 1548 let Inst{7} = 0; 1549 let Inst{6-5} = 0b00; 1550 let Inst{4} = 1; 1551 let Inst{3-0} = 0b0000; 1552} 1553 1554// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags 1555// to APSR. 1556let Defs = [CPSR], Uses = [FPSCR_NZCV], Rt = 0b1111 /* apsr_nzcv */ in 1557def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins), 1558 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>; 1559 1560// Application level FPSCR -> GPR 1561let hasSideEffects = 1, Uses = [FPSCR] in 1562def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins), 1563 "vmrs", "\t$Rt, fpscr", 1564 [(set GPR:$Rt, (int_arm_get_fpscr))]>; 1565 1566// System level FPEXC, FPSID -> GPR 1567let Uses = [FPSCR] in { 1568 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins), 1569 "vmrs", "\t$Rt, fpexc", []>; 1570 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins), 1571 "vmrs", "\t$Rt, fpsid", []>; 1572 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPR:$Rt), (ins), 1573 "vmrs", "\t$Rt, mvfr0", []>; 1574 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPR:$Rt), (ins), 1575 "vmrs", "\t$Rt, mvfr1", []>; 1576 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPR:$Rt), (ins), 1577 "vmrs", "\t$Rt, mvfr2", []>, Requires<[HasFPARMv8]>; 1578 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPR:$Rt), (ins), 1579 "vmrs", "\t$Rt, fpinst", []>; 1580 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPR:$Rt), (ins), 1581 "vmrs", "\t$Rt, fpinst2", []>; 1582} 1583 1584//===----------------------------------------------------------------------===// 1585// Move from ARM core register to VFP System Register. 1586// 1587 1588class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 1589 list<dag> pattern>: 1590 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> { 1591 1592 // Instruction operand. 1593 bits<4> src; 1594 1595 // Encode instruction operand. 1596 let Inst{15-12} = src; 1597 1598 let Inst{27-20} = 0b11101110; 1599 let Inst{19-16} = opc19_16; 1600 let Inst{11-8} = 0b1010; 1601 let Inst{7} = 0; 1602 let Inst{4} = 1; 1603} 1604 1605let Defs = [FPSCR] in { 1606 // Application level GPR -> FPSCR 1607 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src), 1608 "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>; 1609 // System level GPR -> FPEXC 1610 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src), 1611 "vmsr", "\tfpexc, $src", []>; 1612 // System level GPR -> FPSID 1613 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src), 1614 "vmsr", "\tfpsid, $src", []>; 1615 1616 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPR:$src), 1617 "vmsr", "\tfpinst, $src", []>; 1618 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPR:$src), 1619 "vmsr", "\tfpinst2, $src", []>; 1620} 1621 1622//===----------------------------------------------------------------------===// 1623// Misc. 1624// 1625 1626// Materialize FP immediates. VFP3 only. 1627let isReMaterializable = 1 in { 1628def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm), 1629 VFPMiscFrm, IIC_fpUNA64, 1630 "vmov", ".f64\t$Dd, $imm", 1631 [(set DPR:$Dd, vfp_f64imm:$imm)]>, 1632 Requires<[HasVFP3,HasDPVFP]> { 1633 bits<5> Dd; 1634 bits<8> imm; 1635 1636 let Inst{27-23} = 0b11101; 1637 let Inst{22} = Dd{4}; 1638 let Inst{21-20} = 0b11; 1639 let Inst{19-16} = imm{7-4}; 1640 let Inst{15-12} = Dd{3-0}; 1641 let Inst{11-9} = 0b101; 1642 let Inst{8} = 1; // Double precision. 1643 let Inst{7-4} = 0b0000; 1644 let Inst{3-0} = imm{3-0}; 1645} 1646 1647def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm), 1648 VFPMiscFrm, IIC_fpUNA32, 1649 "vmov", ".f32\t$Sd, $imm", 1650 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> { 1651 bits<5> Sd; 1652 bits<8> imm; 1653 1654 let Inst{27-23} = 0b11101; 1655 let Inst{22} = Sd{0}; 1656 let Inst{21-20} = 0b11; 1657 let Inst{19-16} = imm{7-4}; 1658 let Inst{15-12} = Sd{4-1}; 1659 let Inst{11-9} = 0b101; 1660 let Inst{8} = 0; // Single precision. 1661 let Inst{7-4} = 0b0000; 1662 let Inst{3-0} = imm{3-0}; 1663} 1664} 1665 1666//===----------------------------------------------------------------------===// 1667// Assembler aliases. 1668// 1669// A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to 1670// support them all, but supporting at least some of the basics is 1671// good to be friendly. 1672def : VFP2MnemonicAlias<"flds", "vldr">; 1673def : VFP2MnemonicAlias<"fldd", "vldr">; 1674def : VFP2MnemonicAlias<"fmrs", "vmov">; 1675def : VFP2MnemonicAlias<"fmsr", "vmov">; 1676def : VFP2MnemonicAlias<"fsqrts", "vsqrt">; 1677def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">; 1678def : VFP2MnemonicAlias<"fadds", "vadd.f32">; 1679def : VFP2MnemonicAlias<"faddd", "vadd.f64">; 1680def : VFP2MnemonicAlias<"fmrdd", "vmov">; 1681def : VFP2MnemonicAlias<"fmrds", "vmov">; 1682def : VFP2MnemonicAlias<"fmrrd", "vmov">; 1683def : VFP2MnemonicAlias<"fmdrr", "vmov">; 1684def : VFP2MnemonicAlias<"fmuls", "vmul.f32">; 1685def : VFP2MnemonicAlias<"fmuld", "vmul.f64">; 1686def : VFP2MnemonicAlias<"fnegs", "vneg.f32">; 1687def : VFP2MnemonicAlias<"fnegd", "vneg.f64">; 1688def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">; 1689def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">; 1690def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">; 1691def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">; 1692def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">; 1693def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">; 1694def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">; 1695def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">; 1696def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">; 1697def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">; 1698def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">; 1699def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">; 1700def : VFP2MnemonicAlias<"fsts", "vstr">; 1701def : VFP2MnemonicAlias<"fstd", "vstr">; 1702def : VFP2MnemonicAlias<"fmacd", "vmla.f64">; 1703def : VFP2MnemonicAlias<"fmacs", "vmla.f32">; 1704def : VFP2MnemonicAlias<"fcpys", "vmov.f32">; 1705def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">; 1706def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">; 1707def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">; 1708def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">; 1709def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">; 1710def : VFP2MnemonicAlias<"fmrx", "vmrs">; 1711def : VFP2MnemonicAlias<"fmxr", "vmsr">; 1712 1713// Be friendly and accept the old form of zero-compare 1714def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>; 1715def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>; 1716 1717 1718def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>; 1719def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm", 1720 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 1721def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm", 1722 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 1723def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm", 1724 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 1725def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm", 1726 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 1727 1728// No need for the size suffix on VSQRT. It's implied by the register classes. 1729def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>; 1730def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>; 1731 1732// VLDR/VSTR accept an optional type suffix. 1733def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr", 1734 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 1735def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr", 1736 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 1737def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr", 1738 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 1739def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr", 1740 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 1741 1742// VMOV can accept optional 32-bit or less data type suffix suffix. 1743def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn", 1744 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 1745def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn", 1746 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 1747def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn", 1748 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 1749def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt", 1750 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 1751def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt", 1752 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 1753def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt", 1754 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 1755 1756def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn", 1757 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>; 1758def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2", 1759 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>; 1760 1761// VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way 1762// VMOVD does. 1763def : VFP2InstAlias<"vmov${p} $Sd, $Sm", 1764 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>; 1765 1766// FCONSTD/FCONSTS alias for vmov.f64/vmov.f32 1767// These aliases provide added functionality over vmov.f instructions by 1768// allowing users to write assembly containing encoded floating point constants 1769// (e.g. #0x70 vs #1.0). Without these alises there is no way for the 1770// assembler to accept encoded fp constants (but the equivalent fp-literal is 1771// accepted directly by vmovf). 1772def : VFP3InstAlias<"fconstd${p} $Dd, $val", 1773 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>; 1774def : VFP3InstAlias<"fconsts${p} $Sd, $val", 1775 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>; 1776