ARMRegisterInfo.td revision 9fe2009956fc40f3aea46fb3c38dcfb61c4aca46
1//===- ARMRegisterInfo.td - ARM Register defs --------------*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10//===----------------------------------------------------------------------===// 11// Declarations that describe the ARM register file 12//===----------------------------------------------------------------------===// 13 14// Registers are identified with 4-bit ID numbers. 15class ARMReg<bits<4> num, string n, list<Register> subregs = []> : Register<n> { 16 field bits<4> Num; 17 let Namespace = "ARM"; 18 let SubRegs = subregs; 19} 20 21class ARMFReg<bits<6> num, string n> : Register<n> { 22 field bits<6> Num; 23 let Namespace = "ARM"; 24} 25 26// Subregister indices. 27let Namespace = "ARM" in { 28// Note: Code depends on these having consecutive numbers. 29def ssub_0 : SubRegIndex; 30def ssub_1 : SubRegIndex; 31def ssub_2 : SubRegIndex; // In a Q reg. 32def ssub_3 : SubRegIndex; 33def ssub_4 : SubRegIndex; // In a QQ reg. 34def ssub_5 : SubRegIndex; 35def ssub_6 : SubRegIndex; 36def ssub_7 : SubRegIndex; 37def ssub_8 : SubRegIndex; // In a QQQQ reg. 38def ssub_9 : SubRegIndex; 39def ssub_10 : SubRegIndex; 40def ssub_11 : SubRegIndex; 41def ssub_12 : SubRegIndex; 42def ssub_13 : SubRegIndex; 43def ssub_14 : SubRegIndex; 44def ssub_15 : SubRegIndex; 45 46def dsub_0 : SubRegIndex; 47def dsub_1 : SubRegIndex; 48def dsub_2 : SubRegIndex; 49def dsub_3 : SubRegIndex; 50def dsub_4 : SubRegIndex; 51def dsub_5 : SubRegIndex; 52def dsub_6 : SubRegIndex; 53def dsub_7 : SubRegIndex; 54 55def qsub_0 : SubRegIndex; 56def qsub_1 : SubRegIndex; 57def qsub_2 : SubRegIndex; 58def qsub_3 : SubRegIndex; 59 60def qqsub_0 : SubRegIndex; 61def qqsub_1 : SubRegIndex; 62} 63 64// Integer registers 65def R0 : ARMReg< 0, "r0">, DwarfRegNum<[0]>; 66def R1 : ARMReg< 1, "r1">, DwarfRegNum<[1]>; 67def R2 : ARMReg< 2, "r2">, DwarfRegNum<[2]>; 68def R3 : ARMReg< 3, "r3">, DwarfRegNum<[3]>; 69def R4 : ARMReg< 4, "r4">, DwarfRegNum<[4]>; 70def R5 : ARMReg< 5, "r5">, DwarfRegNum<[5]>; 71def R6 : ARMReg< 6, "r6">, DwarfRegNum<[6]>; 72def R7 : ARMReg< 7, "r7">, DwarfRegNum<[7]>; 73def R8 : ARMReg< 8, "r8">, DwarfRegNum<[8]>; 74def R9 : ARMReg< 9, "r9">, DwarfRegNum<[9]>; 75def R10 : ARMReg<10, "r10">, DwarfRegNum<[10]>; 76def R11 : ARMReg<11, "r11">, DwarfRegNum<[11]>; 77def R12 : ARMReg<12, "r12">, DwarfRegNum<[12]>; 78def SP : ARMReg<13, "sp">, DwarfRegNum<[13]>; 79def LR : ARMReg<14, "lr">, DwarfRegNum<[14]>; 80def PC : ARMReg<15, "pc">, DwarfRegNum<[15]>; 81 82// Float registers 83def S0 : ARMFReg< 0, "s0">; def S1 : ARMFReg< 1, "s1">; 84def S2 : ARMFReg< 2, "s2">; def S3 : ARMFReg< 3, "s3">; 85def S4 : ARMFReg< 4, "s4">; def S5 : ARMFReg< 5, "s5">; 86def S6 : ARMFReg< 6, "s6">; def S7 : ARMFReg< 7, "s7">; 87def S8 : ARMFReg< 8, "s8">; def S9 : ARMFReg< 9, "s9">; 88def S10 : ARMFReg<10, "s10">; def S11 : ARMFReg<11, "s11">; 89def S12 : ARMFReg<12, "s12">; def S13 : ARMFReg<13, "s13">; 90def S14 : ARMFReg<14, "s14">; def S15 : ARMFReg<15, "s15">; 91def S16 : ARMFReg<16, "s16">; def S17 : ARMFReg<17, "s17">; 92def S18 : ARMFReg<18, "s18">; def S19 : ARMFReg<19, "s19">; 93def S20 : ARMFReg<20, "s20">; def S21 : ARMFReg<21, "s21">; 94def S22 : ARMFReg<22, "s22">; def S23 : ARMFReg<23, "s23">; 95def S24 : ARMFReg<24, "s24">; def S25 : ARMFReg<25, "s25">; 96def S26 : ARMFReg<26, "s26">; def S27 : ARMFReg<27, "s27">; 97def S28 : ARMFReg<28, "s28">; def S29 : ARMFReg<29, "s29">; 98def S30 : ARMFReg<30, "s30">; def S31 : ARMFReg<31, "s31">; 99 100// Aliases of the F* registers used to hold 64-bit fp values (doubles) 101let SubRegIndices = [ssub_0, ssub_1] in { 102def D0 : ARMReg< 0, "d0", [S0, S1]>; 103def D1 : ARMReg< 1, "d1", [S2, S3]>; 104def D2 : ARMReg< 2, "d2", [S4, S5]>; 105def D3 : ARMReg< 3, "d3", [S6, S7]>; 106def D4 : ARMReg< 4, "d4", [S8, S9]>; 107def D5 : ARMReg< 5, "d5", [S10, S11]>; 108def D6 : ARMReg< 6, "d6", [S12, S13]>; 109def D7 : ARMReg< 7, "d7", [S14, S15]>; 110def D8 : ARMReg< 8, "d8", [S16, S17]>; 111def D9 : ARMReg< 9, "d9", [S18, S19]>; 112def D10 : ARMReg<10, "d10", [S20, S21]>; 113def D11 : ARMReg<11, "d11", [S22, S23]>; 114def D12 : ARMReg<12, "d12", [S24, S25]>; 115def D13 : ARMReg<13, "d13", [S26, S27]>; 116def D14 : ARMReg<14, "d14", [S28, S29]>; 117def D15 : ARMReg<15, "d15", [S30, S31]>; 118} 119 120// VFP3 defines 16 additional double registers 121def D16 : ARMFReg<16, "d16">; def D17 : ARMFReg<17, "d17">; 122def D18 : ARMFReg<18, "d18">; def D19 : ARMFReg<19, "d19">; 123def D20 : ARMFReg<20, "d20">; def D21 : ARMFReg<21, "d21">; 124def D22 : ARMFReg<22, "d22">; def D23 : ARMFReg<23, "d23">; 125def D24 : ARMFReg<24, "d24">; def D25 : ARMFReg<25, "d25">; 126def D26 : ARMFReg<26, "d26">; def D27 : ARMFReg<27, "d27">; 127def D28 : ARMFReg<28, "d28">; def D29 : ARMFReg<29, "d29">; 128def D30 : ARMFReg<30, "d30">; def D31 : ARMFReg<31, "d31">; 129 130// Advanced SIMD (NEON) defines 16 quad-word aliases 131let SubRegIndices = [dsub_0, dsub_1], 132 CompositeIndices = [(ssub_2 dsub_1, ssub_0), 133 (ssub_3 dsub_1, ssub_1)] in { 134def Q0 : ARMReg< 0, "q0", [D0, D1]>; 135def Q1 : ARMReg< 1, "q1", [D2, D3]>; 136def Q2 : ARMReg< 2, "q2", [D4, D5]>; 137def Q3 : ARMReg< 3, "q3", [D6, D7]>; 138def Q4 : ARMReg< 4, "q4", [D8, D9]>; 139def Q5 : ARMReg< 5, "q5", [D10, D11]>; 140def Q6 : ARMReg< 6, "q6", [D12, D13]>; 141def Q7 : ARMReg< 7, "q7", [D14, D15]>; 142} 143let SubRegIndices = [dsub_0, dsub_1] in { 144def Q8 : ARMReg< 8, "q8", [D16, D17]>; 145def Q9 : ARMReg< 9, "q9", [D18, D19]>; 146def Q10 : ARMReg<10, "q10", [D20, D21]>; 147def Q11 : ARMReg<11, "q11", [D22, D23]>; 148def Q12 : ARMReg<12, "q12", [D24, D25]>; 149def Q13 : ARMReg<13, "q13", [D26, D27]>; 150def Q14 : ARMReg<14, "q14", [D28, D29]>; 151def Q15 : ARMReg<15, "q15", [D30, D31]>; 152} 153 154// Pseudo 256-bit registers to represent pairs of Q registers. These should 155// never be present in the emitted code. 156// These are used for NEON load / store instructions, e.g., vld4, vst3. 157// NOTE: It's possible to define more QQ registers since technically the 158// starting D register number doesn't have to be multiple of 4, e.g., 159// D1, D2, D3, D4 would be a legal quad, but that would make the subregister 160// stuff very messy. 161let SubRegIndices = [qsub_0, qsub_1] in { 162let CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1), 163 (ssub_4 qsub_1, ssub_0), (ssub_5 qsub_1, ssub_1), 164 (ssub_6 qsub_1, ssub_2), (ssub_7 qsub_1, ssub_3)] in { 165def QQ0 : ARMReg<0, "qq0", [Q0, Q1]>; 166def QQ1 : ARMReg<1, "qq1", [Q2, Q3]>; 167def QQ2 : ARMReg<2, "qq2", [Q4, Q5]>; 168def QQ3 : ARMReg<3, "qq3", [Q6, Q7]>; 169} 170let CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1)] in { 171def QQ4 : ARMReg<4, "qq4", [Q8, Q9]>; 172def QQ5 : ARMReg<5, "qq5", [Q10, Q11]>; 173def QQ6 : ARMReg<6, "qq6", [Q12, Q13]>; 174def QQ7 : ARMReg<7, "qq7", [Q14, Q15]>; 175} 176} 177 178// Pseudo 512-bit registers to represent four consecutive Q registers. 179let SubRegIndices = [qqsub_0, qqsub_1] in { 180let CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1), 181 (dsub_4 qqsub_1, dsub_0), (dsub_5 qqsub_1, dsub_1), 182 (dsub_6 qqsub_1, dsub_2), (dsub_7 qqsub_1, dsub_3), 183 (ssub_8 qqsub_1, ssub_0), (ssub_9 qqsub_1, ssub_1), 184 (ssub_10 qqsub_1, ssub_2), (ssub_11 qqsub_1, ssub_3), 185 (ssub_12 qqsub_1, ssub_4), (ssub_13 qqsub_1, ssub_5), 186 (ssub_14 qqsub_1, ssub_6), (ssub_15 qqsub_1, ssub_7)] in 187{ 188def QQQQ0 : ARMReg<0, "qqqq0", [QQ0, QQ1]>; 189def QQQQ1 : ARMReg<1, "qqqq1", [QQ2, QQ3]>; 190} 191let CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1), 192 (dsub_4 qqsub_1, dsub_0), (dsub_5 qqsub_1, dsub_1), 193 (dsub_6 qqsub_1, dsub_2), (dsub_7 qqsub_1, dsub_3)] in { 194def QQQQ2 : ARMReg<2, "qqqq2", [QQ4, QQ5]>; 195def QQQQ3 : ARMReg<3, "qqqq3", [QQ6, QQ7]>; 196} 197} 198 199// Current Program Status Register. 200def CPSR : ARMReg<0, "cpsr">; 201def FPSCR : ARMReg<1, "fpscr">; 202def ITSTATE : ARMReg<2, "itstate">; 203 204// Special Registers - only available in privileged mode. 205def FPSID : ARMReg<0, "fpsid">; 206def FPEXC : ARMReg<8, "fpexc">; 207 208// Register classes. 209// 210// pc == Program Counter 211// lr == Link Register 212// sp == Stack Pointer 213// r12 == ip (scratch) 214// r7 == Frame Pointer (thumb-style backtraces) 215// r9 == May be reserved as Thread Register 216// r11 == Frame Pointer (arm-style backtraces) 217// r10 == Stack Limit 218// 219def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, 220 R7, R8, R9, R10, R11, R12, 221 SP, LR, PC]> { 222 let MethodProtos = [{ 223 iterator allocation_order_begin(const MachineFunction &MF) const; 224 iterator allocation_order_end(const MachineFunction &MF) const; 225 }]; 226 let MethodBodies = [{ 227 static const unsigned ARM_GPR_AO[] = { 228 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 229 ARM::R12,ARM::LR, 230 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 231 ARM::R8, ARM::R9, ARM::R10, ARM::R11 }; 232 233 // For Thumb1 mode, we don't want to allocate hi regs at all, as we 234 // don't know how to spill them. If we make our prologue/epilogue code 235 // smarter at some point, we can go back to using the above allocation 236 // orders for the Thumb1 instructions that know how to use hi regs. 237 static const unsigned THUMB_GPR_AO[] = { 238 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 239 ARM::R4, ARM::R5, ARM::R6, ARM::R7 }; 240 241 GPRClass::iterator 242 GPRClass::allocation_order_begin(const MachineFunction &MF) const { 243 const TargetMachine &TM = MF.getTarget(); 244 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 245 if (Subtarget.isThumb1Only()) 246 return THUMB_GPR_AO; 247 return ARM_GPR_AO; 248 } 249 250 GPRClass::iterator 251 GPRClass::allocation_order_end(const MachineFunction &MF) const { 252 const TargetMachine &TM = MF.getTarget(); 253 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 254 if (Subtarget.isThumb1Only()) 255 return THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned)); 256 return ARM_GPR_AO + (sizeof(ARM_GPR_AO)/sizeof(unsigned)); 257 } 258 }]; 259} 260 261// restricted GPR register class. Many Thumb2 instructions allow the full 262// register range for operands, but have undefined behaviours when PC 263// or SP (R13 or R15) are used. The ARM ISA refers to these operands 264// via the BadReg() pseudo-code description. 265def rGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, 266 R7, R8, R9, R10, R11, R12, LR]> { 267 let MethodProtos = [{ 268 iterator allocation_order_begin(const MachineFunction &MF) const; 269 iterator allocation_order_end(const MachineFunction &MF) const; 270 }]; 271 let MethodBodies = [{ 272 static const unsigned ARM_rGPR_AO[] = { 273 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 274 ARM::R12,ARM::LR, 275 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 276 ARM::R8, ARM::R9, ARM::R10, 277 ARM::R11 }; 278 279 // For Thumb1 mode, we don't want to allocate hi regs at all, as we 280 // don't know how to spill them. If we make our prologue/epilogue code 281 // smarter at some point, we can go back to using the above allocation 282 // orders for the Thumb1 instructions that know how to use hi regs. 283 static const unsigned THUMB_rGPR_AO[] = { 284 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 285 ARM::R4, ARM::R5, ARM::R6, ARM::R7 }; 286 287 rGPRClass::iterator 288 rGPRClass::allocation_order_begin(const MachineFunction &MF) const { 289 const TargetMachine &TM = MF.getTarget(); 290 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 291 if (Subtarget.isThumb1Only()) 292 return THUMB_rGPR_AO; 293 return ARM_rGPR_AO; 294 } 295 296 rGPRClass::iterator 297 rGPRClass::allocation_order_end(const MachineFunction &MF) const { 298 const TargetMachine &TM = MF.getTarget(); 299 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 300 301 if (Subtarget.isThumb1Only()) 302 return THUMB_rGPR_AO + (sizeof(THUMB_rGPR_AO)/sizeof(unsigned)); 303 return ARM_rGPR_AO + (sizeof(ARM_rGPR_AO)/sizeof(unsigned)); 304 } 305 }]; 306} 307 308// Thumb registers are R0-R7 normally. Some instructions can still use 309// the general GPR register class above (MOV, e.g.) 310def tGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> {} 311 312// For tail calls, we can't use callee-saved registers, as they are restored 313// to the saved value before the tail call, which would clobber a call address. 314// Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of 315// this class and the preceding one(!) This is what we want. 316def tcGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R9, R12]> { 317 let MethodProtos = [{ 318 iterator allocation_order_begin(const MachineFunction &MF) const; 319 iterator allocation_order_end(const MachineFunction &MF) const; 320 }]; 321 let MethodBodies = [{ 322 // R9 is available. 323 static const unsigned ARM_GPR_R9_TC[] = { 324 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 325 ARM::R9, ARM::R12 }; 326 // R9 is not available. 327 static const unsigned ARM_GPR_NOR9_TC[] = { 328 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 329 ARM::R12 }; 330 331 // For Thumb1 mode, we don't want to allocate hi regs at all, as we 332 // don't know how to spill them. If we make our prologue/epilogue code 333 // smarter at some point, we can go back to using the above allocation 334 // orders for the Thumb1 instructions that know how to use hi regs. 335 static const unsigned THUMB_GPR_AO_TC[] = { 336 ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 337 338 tcGPRClass::iterator 339 tcGPRClass::allocation_order_begin(const MachineFunction &MF) const { 340 const TargetMachine &TM = MF.getTarget(); 341 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 342 if (Subtarget.isThumb1Only()) 343 return THUMB_GPR_AO_TC; 344 return Subtarget.isTargetDarwin() ? ARM_GPR_R9_TC : ARM_GPR_NOR9_TC; 345 } 346 347 tcGPRClass::iterator 348 tcGPRClass::allocation_order_end(const MachineFunction &MF) const { 349 const TargetMachine &TM = MF.getTarget(); 350 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 351 352 if (Subtarget.isThumb1Only()) 353 return THUMB_GPR_AO_TC + (sizeof(THUMB_GPR_AO_TC)/sizeof(unsigned)); 354 355 return Subtarget.isTargetDarwin() ? 356 ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned)) : 357 ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned)); 358 } 359 }]; 360} 361 362 363// Scalar single precision floating point register class.. 364def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8, 365 S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22, 366 S23, S24, S25, S26, S27, S28, S29, S30, S31]>; 367 368// Subset of SPR which can be used as a source of NEON scalars for 16-bit 369// operations 370def SPR_8 : RegisterClass<"ARM", [f32], 32, 371 [S0, S1, S2, S3, S4, S5, S6, S7, 372 S8, S9, S10, S11, S12, S13, S14, S15]>; 373 374// Scalar double precision floating point / generic 64-bit vector register 375// class. 376// ARM requires only word alignment for double. It's more performant if it 377// is double-word alignment though. 378def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64, 379 [D0, D1, D2, D3, D4, D5, D6, D7, 380 D8, D9, D10, D11, D12, D13, D14, D15, 381 D16, D17, D18, D19, D20, D21, D22, D23, 382 D24, D25, D26, D27, D28, D29, D30, D31]> { 383 let MethodProtos = [{ 384 iterator allocation_order_begin(const MachineFunction &MF) const; 385 iterator allocation_order_end(const MachineFunction &MF) const; 386 }]; 387 let MethodBodies = [{ 388 // VFP2 / VFPv3-D16 389 static const unsigned ARM_DPR_VFP2[] = { 390 ARM::D0, ARM::D1, ARM::D2, ARM::D3, 391 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 392 ARM::D8, ARM::D9, ARM::D10, ARM::D11, 393 ARM::D12, ARM::D13, ARM::D14, ARM::D15 }; 394 // VFP3: D8-D15 are callee saved and should be allocated last. 395 // Save other low registers for use as DPR_VFP2 and DPR_8 classes. 396 static const unsigned ARM_DPR_VFP3[] = { 397 ARM::D16, ARM::D17, ARM::D18, ARM::D19, 398 ARM::D20, ARM::D21, ARM::D22, ARM::D23, 399 ARM::D24, ARM::D25, ARM::D26, ARM::D27, 400 ARM::D28, ARM::D29, ARM::D30, ARM::D31, 401 ARM::D0, ARM::D1, ARM::D2, ARM::D3, 402 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 403 ARM::D8, ARM::D9, ARM::D10, ARM::D11, 404 ARM::D12, ARM::D13, ARM::D14, ARM::D15 }; 405 406 DPRClass::iterator 407 DPRClass::allocation_order_begin(const MachineFunction &MF) const { 408 const TargetMachine &TM = MF.getTarget(); 409 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 410 if (Subtarget.hasVFP3() && !Subtarget.hasD16()) 411 return ARM_DPR_VFP3; 412 return ARM_DPR_VFP2; 413 } 414 415 DPRClass::iterator 416 DPRClass::allocation_order_end(const MachineFunction &MF) const { 417 const TargetMachine &TM = MF.getTarget(); 418 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 419 if (Subtarget.hasVFP3() && !Subtarget.hasD16()) 420 return ARM_DPR_VFP3 + (sizeof(ARM_DPR_VFP3)/sizeof(unsigned)); 421 else 422 return ARM_DPR_VFP2 + (sizeof(ARM_DPR_VFP2)/sizeof(unsigned)); 423 } 424 }]; 425} 426 427// Subset of DPR that are accessible with VFP2 (and so that also have 428// 32-bit SPR subregs). 429def DPR_VFP2 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64, 430 [D0, D1, D2, D3, D4, D5, D6, D7, 431 D8, D9, D10, D11, D12, D13, D14, D15]> { 432 let SubRegClasses = [(SPR ssub_0, ssub_1)]; 433} 434 435// Subset of DPR which can be used as a source of NEON scalars for 16-bit 436// operations 437def DPR_8 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64, 438 [D0, D1, D2, D3, D4, D5, D6, D7]> { 439 let SubRegClasses = [(SPR_8 ssub_0, ssub_1)]; 440} 441 442// Generic 128-bit vector register class. 443def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128, 444 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, 445 Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15]> { 446 let SubRegClasses = [(DPR dsub_0, dsub_1)]; 447 let MethodProtos = [{ 448 iterator allocation_order_begin(const MachineFunction &MF) const; 449 iterator allocation_order_end(const MachineFunction &MF) const; 450 }]; 451 let MethodBodies = [{ 452 // Q4-Q7 are callee saved and should be allocated last. 453 // Save other low registers for use as QPR_VFP2 and QPR_8 classes. 454 static const unsigned ARM_QPR[] = { 455 ARM::Q8, ARM::Q9, ARM::Q10, ARM::Q11, 456 ARM::Q12, ARM::Q13, ARM::Q14, ARM::Q15, 457 ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 458 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7 }; 459 460 QPRClass::iterator 461 QPRClass::allocation_order_begin(const MachineFunction &MF) const { 462 return ARM_QPR; 463 } 464 465 QPRClass::iterator 466 QPRClass::allocation_order_end(const MachineFunction &MF) const { 467 return ARM_QPR + (sizeof(ARM_QPR)/sizeof(unsigned)); 468 } 469 }]; 470} 471 472// Subset of QPR that have 32-bit SPR subregs. 473def QPR_VFP2 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 474 128, 475 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]> { 476 let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3), 477 (DPR_VFP2 dsub_0, dsub_1)]; 478} 479 480// Subset of QPR that have DPR_8 and SPR_8 subregs. 481def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 482 128, 483 [Q0, Q1, Q2, Q3]> { 484 let SubRegClasses = [(SPR_8 ssub_0, ssub_1, ssub_2, ssub_3), 485 (DPR_8 dsub_0, dsub_1)]; 486} 487 488// Pseudo 256-bit vector register class to model pairs of Q registers 489// (4 consecutive D registers). 490def QQPR : RegisterClass<"ARM", [v4i64], 491 256, 492 [QQ0, QQ1, QQ2, QQ3, QQ4, QQ5, QQ6, QQ7]> { 493 let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3), 494 (QPR qsub_0, qsub_1)]; 495 let MethodProtos = [{ 496 iterator allocation_order_begin(const MachineFunction &MF) const; 497 iterator allocation_order_end(const MachineFunction &MF) const; 498 }]; 499 let MethodBodies = [{ 500 // QQ2-QQ3 are callee saved and should be allocated last. 501 // Save other low registers for use as QPR_VFP2 and QPR_8 classes. 502 static const unsigned ARM_QQPR[] = { 503 ARM::QQ4, ARM::QQ5, ARM::QQ6, ARM::QQ7, 504 ARM::QQ0, ARM::QQ1, ARM::QQ2, ARM::QQ3 }; 505 506 QQPRClass::iterator 507 QQPRClass::allocation_order_begin(const MachineFunction &MF) const { 508 return ARM_QQPR; 509 } 510 511 QQPRClass::iterator 512 QQPRClass::allocation_order_end(const MachineFunction &MF) const { 513 return ARM_QQPR + (sizeof(ARM_QQPR)/sizeof(unsigned)); 514 } 515 }]; 516} 517 518// Subset of QQPR that have 32-bit SPR subregs. 519def QQPR_VFP2 : RegisterClass<"ARM", [v4i64], 520 256, 521 [QQ0, QQ1, QQ2, QQ3]> { 522 let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3), 523 (DPR_VFP2 dsub_0, dsub_1, dsub_2, dsub_3), 524 (QPR_VFP2 qsub_0, qsub_1)]; 525 526} 527 528// Pseudo 512-bit vector register class to model 4 consecutive Q registers 529// (8 consecutive D registers). 530def QQQQPR : RegisterClass<"ARM", [v8i64], 531 256, 532 [QQQQ0, QQQQ1, QQQQ2, QQQQ3]> { 533 let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3, 534 dsub_4, dsub_5, dsub_6, dsub_7), 535 (QPR qsub_0, qsub_1, qsub_2, qsub_3)]; 536 let MethodProtos = [{ 537 iterator allocation_order_begin(const MachineFunction &MF) const; 538 iterator allocation_order_end(const MachineFunction &MF) const; 539 }]; 540 let MethodBodies = [{ 541 // QQQQ1 is callee saved and should be allocated last. 542 // Save QQQQ0 for use as QPR_VFP2 and QPR_8 classes. 543 static const unsigned ARM_QQQQPR[] = { 544 ARM::QQQQ2, ARM::QQQQ3, ARM::QQQQ0, ARM::QQQQ1 }; 545 546 QQQQPRClass::iterator 547 QQQQPRClass::allocation_order_begin(const MachineFunction &MF) const { 548 return ARM_QQQQPR; 549 } 550 551 QQQQPRClass::iterator 552 QQQQPRClass::allocation_order_end(const MachineFunction &MF) const { 553 return ARM_QQQQPR + (sizeof(ARM_QQQQPR)/sizeof(unsigned)); 554 } 555 }]; 556} 557 558// Condition code registers. 559def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>; 560