ARMRegisterInfo.td revision ca66226e7e8368719f5b9937ed6523c7117a9d63
1//===- ARMRegisterInfo.td - ARM Register defs --------------*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10//===----------------------------------------------------------------------===// 11// Declarations that describe the ARM register file 12//===----------------------------------------------------------------------===// 13 14// Registers are identified with 4-bit ID numbers. 15class ARMReg<bits<4> num, string n, list<Register> subregs = []> : Register<n> { 16 field bits<4> Num; 17 let Namespace = "ARM"; 18 let SubRegs = subregs; 19} 20 21class ARMFReg<bits<6> num, string n> : Register<n> { 22 field bits<6> Num; 23 let Namespace = "ARM"; 24} 25 26// Subregister indices. 27let Namespace = "ARM" in { 28// Note: Code depends on these having consecutive numbers. 29def ssub_0 : SubRegIndex; 30def ssub_1 : SubRegIndex; 31def ssub_2 : SubRegIndex; // In a Q reg. 32def ssub_3 : SubRegIndex; 33 34def dsub_0 : SubRegIndex; 35def dsub_1 : SubRegIndex; 36def dsub_2 : SubRegIndex; 37def dsub_3 : SubRegIndex; 38def dsub_4 : SubRegIndex; 39def dsub_5 : SubRegIndex; 40def dsub_6 : SubRegIndex; 41def dsub_7 : SubRegIndex; 42 43def qsub_0 : SubRegIndex; 44def qsub_1 : SubRegIndex; 45def qsub_2 : SubRegIndex; 46def qsub_3 : SubRegIndex; 47 48def qqsub_0 : SubRegIndex; 49def qqsub_1 : SubRegIndex; 50} 51 52// Integer registers 53def R0 : ARMReg< 0, "r0">, DwarfRegNum<[0]>; 54def R1 : ARMReg< 1, "r1">, DwarfRegNum<[1]>; 55def R2 : ARMReg< 2, "r2">, DwarfRegNum<[2]>; 56def R3 : ARMReg< 3, "r3">, DwarfRegNum<[3]>; 57def R4 : ARMReg< 4, "r4">, DwarfRegNum<[4]>; 58def R5 : ARMReg< 5, "r5">, DwarfRegNum<[5]>; 59def R6 : ARMReg< 6, "r6">, DwarfRegNum<[6]>; 60def R7 : ARMReg< 7, "r7">, DwarfRegNum<[7]>; 61// These require 32-bit instructions. 62let CostPerUse = 1 in { 63def R8 : ARMReg< 8, "r8">, DwarfRegNum<[8]>; 64def R9 : ARMReg< 9, "r9">, DwarfRegNum<[9]>; 65def R10 : ARMReg<10, "r10">, DwarfRegNum<[10]>; 66def R11 : ARMReg<11, "r11">, DwarfRegNum<[11]>; 67def R12 : ARMReg<12, "r12">, DwarfRegNum<[12]>; 68def SP : ARMReg<13, "sp">, DwarfRegNum<[13]>; 69def LR : ARMReg<14, "lr">, DwarfRegNum<[14]>; 70def PC : ARMReg<15, "pc">, DwarfRegNum<[15]>; 71} 72 73// Float registers 74def S0 : ARMFReg< 0, "s0">; def S1 : ARMFReg< 1, "s1">; 75def S2 : ARMFReg< 2, "s2">; def S3 : ARMFReg< 3, "s3">; 76def S4 : ARMFReg< 4, "s4">; def S5 : ARMFReg< 5, "s5">; 77def S6 : ARMFReg< 6, "s6">; def S7 : ARMFReg< 7, "s7">; 78def S8 : ARMFReg< 8, "s8">; def S9 : ARMFReg< 9, "s9">; 79def S10 : ARMFReg<10, "s10">; def S11 : ARMFReg<11, "s11">; 80def S12 : ARMFReg<12, "s12">; def S13 : ARMFReg<13, "s13">; 81def S14 : ARMFReg<14, "s14">; def S15 : ARMFReg<15, "s15">; 82def S16 : ARMFReg<16, "s16">; def S17 : ARMFReg<17, "s17">; 83def S18 : ARMFReg<18, "s18">; def S19 : ARMFReg<19, "s19">; 84def S20 : ARMFReg<20, "s20">; def S21 : ARMFReg<21, "s21">; 85def S22 : ARMFReg<22, "s22">; def S23 : ARMFReg<23, "s23">; 86def S24 : ARMFReg<24, "s24">; def S25 : ARMFReg<25, "s25">; 87def S26 : ARMFReg<26, "s26">; def S27 : ARMFReg<27, "s27">; 88def S28 : ARMFReg<28, "s28">; def S29 : ARMFReg<29, "s29">; 89def S30 : ARMFReg<30, "s30">; def S31 : ARMFReg<31, "s31">; 90 91// Aliases of the F* registers used to hold 64-bit fp values (doubles) 92let SubRegIndices = [ssub_0, ssub_1] in { 93def D0 : ARMReg< 0, "d0", [S0, S1]>, DwarfRegNum<[256]>; 94def D1 : ARMReg< 1, "d1", [S2, S3]>, DwarfRegNum<[257]>; 95def D2 : ARMReg< 2, "d2", [S4, S5]>, DwarfRegNum<[258]>; 96def D3 : ARMReg< 3, "d3", [S6, S7]>, DwarfRegNum<[259]>; 97def D4 : ARMReg< 4, "d4", [S8, S9]>, DwarfRegNum<[260]>; 98def D5 : ARMReg< 5, "d5", [S10, S11]>, DwarfRegNum<[261]>; 99def D6 : ARMReg< 6, "d6", [S12, S13]>, DwarfRegNum<[262]>; 100def D7 : ARMReg< 7, "d7", [S14, S15]>, DwarfRegNum<[263]>; 101def D8 : ARMReg< 8, "d8", [S16, S17]>, DwarfRegNum<[264]>; 102def D9 : ARMReg< 9, "d9", [S18, S19]>, DwarfRegNum<[265]>; 103def D10 : ARMReg<10, "d10", [S20, S21]>, DwarfRegNum<[266]>; 104def D11 : ARMReg<11, "d11", [S22, S23]>, DwarfRegNum<[267]>; 105def D12 : ARMReg<12, "d12", [S24, S25]>, DwarfRegNum<[268]>; 106def D13 : ARMReg<13, "d13", [S26, S27]>, DwarfRegNum<[269]>; 107def D14 : ARMReg<14, "d14", [S28, S29]>, DwarfRegNum<[270]>; 108def D15 : ARMReg<15, "d15", [S30, S31]>, DwarfRegNum<[271]>; 109} 110 111// VFP3 defines 16 additional double registers 112def D16 : ARMFReg<16, "d16">, DwarfRegNum<[272]>; 113def D17 : ARMFReg<17, "d17">, DwarfRegNum<[273]>; 114def D18 : ARMFReg<18, "d18">, DwarfRegNum<[274]>; 115def D19 : ARMFReg<19, "d19">, DwarfRegNum<[275]>; 116def D20 : ARMFReg<20, "d20">, DwarfRegNum<[276]>; 117def D21 : ARMFReg<21, "d21">, DwarfRegNum<[277]>; 118def D22 : ARMFReg<22, "d22">, DwarfRegNum<[278]>; 119def D23 : ARMFReg<23, "d23">, DwarfRegNum<[279]>; 120def D24 : ARMFReg<24, "d24">, DwarfRegNum<[280]>; 121def D25 : ARMFReg<25, "d25">, DwarfRegNum<[281]>; 122def D26 : ARMFReg<26, "d26">, DwarfRegNum<[282]>; 123def D27 : ARMFReg<27, "d27">, DwarfRegNum<[283]>; 124def D28 : ARMFReg<28, "d28">, DwarfRegNum<[284]>; 125def D29 : ARMFReg<29, "d29">, DwarfRegNum<[285]>; 126def D30 : ARMFReg<30, "d30">, DwarfRegNum<[286]>; 127def D31 : ARMFReg<31, "d31">, DwarfRegNum<[287]>; 128 129// Advanced SIMD (NEON) defines 16 quad-word aliases 130let SubRegIndices = [dsub_0, dsub_1], 131 CompositeIndices = [(ssub_2 dsub_1, ssub_0), 132 (ssub_3 dsub_1, ssub_1)] in { 133def Q0 : ARMReg< 0, "q0", [D0, D1]>; 134def Q1 : ARMReg< 1, "q1", [D2, D3]>; 135def Q2 : ARMReg< 2, "q2", [D4, D5]>; 136def Q3 : ARMReg< 3, "q3", [D6, D7]>; 137def Q4 : ARMReg< 4, "q4", [D8, D9]>; 138def Q5 : ARMReg< 5, "q5", [D10, D11]>; 139def Q6 : ARMReg< 6, "q6", [D12, D13]>; 140def Q7 : ARMReg< 7, "q7", [D14, D15]>; 141} 142let SubRegIndices = [dsub_0, dsub_1] in { 143def Q8 : ARMReg< 8, "q8", [D16, D17]>; 144def Q9 : ARMReg< 9, "q9", [D18, D19]>; 145def Q10 : ARMReg<10, "q10", [D20, D21]>; 146def Q11 : ARMReg<11, "q11", [D22, D23]>; 147def Q12 : ARMReg<12, "q12", [D24, D25]>; 148def Q13 : ARMReg<13, "q13", [D26, D27]>; 149def Q14 : ARMReg<14, "q14", [D28, D29]>; 150def Q15 : ARMReg<15, "q15", [D30, D31]>; 151} 152 153// Pseudo 256-bit registers to represent pairs of Q registers. These should 154// never be present in the emitted code. 155// These are used for NEON load / store instructions, e.g., vld4, vst3. 156// NOTE: It's possible to define more QQ registers since technically the 157// starting D register number doesn't have to be multiple of 4, e.g., 158// D1, D2, D3, D4 would be a legal quad, but that would make the subregister 159// stuff very messy. 160let SubRegIndices = [qsub_0, qsub_1], 161 CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1)] in { 162def QQ0 : ARMReg<0, "qq0", [Q0, Q1]>; 163def QQ1 : ARMReg<1, "qq1", [Q2, Q3]>; 164def QQ2 : ARMReg<2, "qq2", [Q4, Q5]>; 165def QQ3 : ARMReg<3, "qq3", [Q6, Q7]>; 166def QQ4 : ARMReg<4, "qq4", [Q8, Q9]>; 167def QQ5 : ARMReg<5, "qq5", [Q10, Q11]>; 168def QQ6 : ARMReg<6, "qq6", [Q12, Q13]>; 169def QQ7 : ARMReg<7, "qq7", [Q14, Q15]>; 170} 171 172// Pseudo 512-bit registers to represent four consecutive Q registers. 173let SubRegIndices = [qqsub_0, qqsub_1], 174 CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1), 175 (dsub_4 qqsub_1, dsub_0), (dsub_5 qqsub_1, dsub_1), 176 (dsub_6 qqsub_1, dsub_2), (dsub_7 qqsub_1, dsub_3)] in { 177def QQQQ0 : ARMReg<0, "qqqq0", [QQ0, QQ1]>; 178def QQQQ1 : ARMReg<1, "qqqq1", [QQ2, QQ3]>; 179def QQQQ2 : ARMReg<2, "qqqq2", [QQ4, QQ5]>; 180def QQQQ3 : ARMReg<3, "qqqq3", [QQ6, QQ7]>; 181} 182 183// Current Program Status Register. 184def CPSR : ARMReg<0, "cpsr">; 185def FPSCR : ARMReg<1, "fpscr">; 186def ITSTATE : ARMReg<2, "itstate">; 187 188// Special Registers - only available in privileged mode. 189def FPSID : ARMReg<0, "fpsid">; 190def FPEXC : ARMReg<8, "fpexc">; 191 192// Register classes. 193// 194// pc == Program Counter 195// lr == Link Register 196// sp == Stack Pointer 197// r12 == ip (scratch) 198// r7 == Frame Pointer (thumb-style backtraces) 199// r9 == May be reserved as Thread Register 200// r11 == Frame Pointer (arm-style backtraces) 201// r10 == Stack Limit 202// 203def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, 204 R7, R8, R9, R10, R11, R12, 205 SP, LR, PC]> { 206 let MethodProtos = [{ 207 iterator allocation_order_begin(const MachineFunction &MF) const; 208 iterator allocation_order_end(const MachineFunction &MF) const; 209 }]; 210 let MethodBodies = [{ 211 static const unsigned ARM_GPR_AO[] = { 212 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 213 ARM::R12,ARM::LR, 214 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 215 ARM::R8, ARM::R9, ARM::R10, ARM::R11 }; 216 217 // For Thumb1 mode, we don't want to allocate hi regs at all, as we 218 // don't know how to spill them. If we make our prologue/epilogue code 219 // smarter at some point, we can go back to using the above allocation 220 // orders for the Thumb1 instructions that know how to use hi regs. 221 static const unsigned THUMB_GPR_AO[] = { 222 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 223 ARM::R4, ARM::R5, ARM::R6, ARM::R7 }; 224 225 GPRClass::iterator 226 GPRClass::allocation_order_begin(const MachineFunction &MF) const { 227 const TargetMachine &TM = MF.getTarget(); 228 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 229 if (Subtarget.isThumb1Only()) 230 return THUMB_GPR_AO; 231 return ARM_GPR_AO; 232 } 233 234 GPRClass::iterator 235 GPRClass::allocation_order_end(const MachineFunction &MF) const { 236 const TargetMachine &TM = MF.getTarget(); 237 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 238 if (Subtarget.isThumb1Only()) 239 return THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned)); 240 return ARM_GPR_AO + (sizeof(ARM_GPR_AO)/sizeof(unsigned)); 241 } 242 }]; 243} 244 245// restricted GPR register class. Many Thumb2 instructions allow the full 246// register range for operands, but have undefined behaviours when PC 247// or SP (R13 or R15) are used. The ARM ISA refers to these operands 248// via the BadReg() pseudo-code description. 249def rGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, 250 R7, R8, R9, R10, R11, R12, LR]> { 251 let MethodProtos = [{ 252 iterator allocation_order_begin(const MachineFunction &MF) const; 253 iterator allocation_order_end(const MachineFunction &MF) const; 254 }]; 255 let MethodBodies = [{ 256 static const unsigned ARM_rGPR_AO[] = { 257 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 258 ARM::R12,ARM::LR, 259 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 260 ARM::R8, ARM::R9, ARM::R10, 261 ARM::R11 }; 262 263 // For Thumb1 mode, we don't want to allocate hi regs at all, as we 264 // don't know how to spill them. If we make our prologue/epilogue code 265 // smarter at some point, we can go back to using the above allocation 266 // orders for the Thumb1 instructions that know how to use hi regs. 267 static const unsigned THUMB_rGPR_AO[] = { 268 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 269 ARM::R4, ARM::R5, ARM::R6, ARM::R7 }; 270 271 rGPRClass::iterator 272 rGPRClass::allocation_order_begin(const MachineFunction &MF) const { 273 const TargetMachine &TM = MF.getTarget(); 274 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 275 if (Subtarget.isThumb1Only()) 276 return THUMB_rGPR_AO; 277 return ARM_rGPR_AO; 278 } 279 280 rGPRClass::iterator 281 rGPRClass::allocation_order_end(const MachineFunction &MF) const { 282 const TargetMachine &TM = MF.getTarget(); 283 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 284 285 if (Subtarget.isThumb1Only()) 286 return THUMB_rGPR_AO + (sizeof(THUMB_rGPR_AO)/sizeof(unsigned)); 287 return ARM_rGPR_AO + (sizeof(ARM_rGPR_AO)/sizeof(unsigned)); 288 } 289 }]; 290} 291 292// Thumb registers are R0-R7 normally. Some instructions can still use 293// the general GPR register class above (MOV, e.g.) 294def tGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> {} 295 296// For tail calls, we can't use callee-saved registers, as they are restored 297// to the saved value before the tail call, which would clobber a call address. 298// Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of 299// this class and the preceding one(!) This is what we want. 300def tcGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R9, R12]> { 301 let MethodProtos = [{ 302 iterator allocation_order_begin(const MachineFunction &MF) const; 303 iterator allocation_order_end(const MachineFunction &MF) const; 304 }]; 305 let MethodBodies = [{ 306 // R9 is available. 307 static const unsigned ARM_GPR_R9_TC[] = { 308 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 309 ARM::R9, ARM::R12 }; 310 // R9 is not available. 311 static const unsigned ARM_GPR_NOR9_TC[] = { 312 ARM::R0, ARM::R1, ARM::R2, ARM::R3, 313 ARM::R12 }; 314 315 // For Thumb1 mode, we don't want to allocate hi regs at all, as we 316 // don't know how to spill them. If we make our prologue/epilogue code 317 // smarter at some point, we can go back to using the above allocation 318 // orders for the Thumb1 instructions that know how to use hi regs. 319 static const unsigned THUMB_GPR_AO_TC[] = { 320 ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 321 322 tcGPRClass::iterator 323 tcGPRClass::allocation_order_begin(const MachineFunction &MF) const { 324 const TargetMachine &TM = MF.getTarget(); 325 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 326 if (Subtarget.isThumb1Only()) 327 return THUMB_GPR_AO_TC; 328 return Subtarget.isTargetDarwin() ? ARM_GPR_R9_TC : ARM_GPR_NOR9_TC; 329 } 330 331 tcGPRClass::iterator 332 tcGPRClass::allocation_order_end(const MachineFunction &MF) const { 333 const TargetMachine &TM = MF.getTarget(); 334 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 335 336 if (Subtarget.isThumb1Only()) 337 return THUMB_GPR_AO_TC + (sizeof(THUMB_GPR_AO_TC)/sizeof(unsigned)); 338 339 return Subtarget.isTargetDarwin() ? 340 ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned)) : 341 ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned)); 342 } 343 }]; 344} 345 346 347// Scalar single precision floating point register class.. 348def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8, 349 S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22, 350 S23, S24, S25, S26, S27, S28, S29, S30, S31]>; 351 352// Subset of SPR which can be used as a source of NEON scalars for 16-bit 353// operations 354def SPR_8 : RegisterClass<"ARM", [f32], 32, 355 [S0, S1, S2, S3, S4, S5, S6, S7, 356 S8, S9, S10, S11, S12, S13, S14, S15]>; 357 358// Scalar double precision floating point / generic 64-bit vector register 359// class. 360// ARM requires only word alignment for double. It's more performant if it 361// is double-word alignment though. 362def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64, 363 [D0, D1, D2, D3, D4, D5, D6, D7, 364 D8, D9, D10, D11, D12, D13, D14, D15, 365 D16, D17, D18, D19, D20, D21, D22, D23, 366 D24, D25, D26, D27, D28, D29, D30, D31]> { 367 let MethodProtos = [{ 368 iterator allocation_order_begin(const MachineFunction &MF) const; 369 iterator allocation_order_end(const MachineFunction &MF) const; 370 }]; 371 let MethodBodies = [{ 372 // VFP2 / VFPv3-D16 373 static const unsigned ARM_DPR_VFP2[] = { 374 ARM::D0, ARM::D1, ARM::D2, ARM::D3, 375 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 376 ARM::D8, ARM::D9, ARM::D10, ARM::D11, 377 ARM::D12, ARM::D13, ARM::D14, ARM::D15 }; 378 // VFP3: D8-D15 are callee saved and should be allocated last. 379 // Save other low registers for use as DPR_VFP2 and DPR_8 classes. 380 static const unsigned ARM_DPR_VFP3[] = { 381 ARM::D16, ARM::D17, ARM::D18, ARM::D19, 382 ARM::D20, ARM::D21, ARM::D22, ARM::D23, 383 ARM::D24, ARM::D25, ARM::D26, ARM::D27, 384 ARM::D28, ARM::D29, ARM::D30, ARM::D31, 385 ARM::D0, ARM::D1, ARM::D2, ARM::D3, 386 ARM::D4, ARM::D5, ARM::D6, ARM::D7, 387 ARM::D8, ARM::D9, ARM::D10, ARM::D11, 388 ARM::D12, ARM::D13, ARM::D14, ARM::D15 }; 389 390 DPRClass::iterator 391 DPRClass::allocation_order_begin(const MachineFunction &MF) const { 392 const TargetMachine &TM = MF.getTarget(); 393 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 394 if (Subtarget.hasVFP3() && !Subtarget.hasD16()) 395 return ARM_DPR_VFP3; 396 return ARM_DPR_VFP2; 397 } 398 399 DPRClass::iterator 400 DPRClass::allocation_order_end(const MachineFunction &MF) const { 401 const TargetMachine &TM = MF.getTarget(); 402 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>(); 403 if (Subtarget.hasVFP3() && !Subtarget.hasD16()) 404 return ARM_DPR_VFP3 + (sizeof(ARM_DPR_VFP3)/sizeof(unsigned)); 405 else 406 return ARM_DPR_VFP2 + (sizeof(ARM_DPR_VFP2)/sizeof(unsigned)); 407 } 408 }]; 409} 410 411// Subset of DPR that are accessible with VFP2 (and so that also have 412// 32-bit SPR subregs). 413def DPR_VFP2 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64, 414 [D0, D1, D2, D3, D4, D5, D6, D7, 415 D8, D9, D10, D11, D12, D13, D14, D15]> { 416 let SubRegClasses = [(SPR ssub_0, ssub_1)]; 417} 418 419// Subset of DPR which can be used as a source of NEON scalars for 16-bit 420// operations 421def DPR_8 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64, 422 [D0, D1, D2, D3, D4, D5, D6, D7]> { 423 let SubRegClasses = [(SPR_8 ssub_0, ssub_1)]; 424} 425 426// Generic 128-bit vector register class. 427def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128, 428 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, 429 Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15]> { 430 let SubRegClasses = [(DPR dsub_0, dsub_1)]; 431 let MethodProtos = [{ 432 iterator allocation_order_begin(const MachineFunction &MF) const; 433 iterator allocation_order_end(const MachineFunction &MF) const; 434 }]; 435 let MethodBodies = [{ 436 // Q4-Q7 are callee saved and should be allocated last. 437 // Save other low registers for use as QPR_VFP2 and QPR_8 classes. 438 static const unsigned ARM_QPR[] = { 439 ARM::Q8, ARM::Q9, ARM::Q10, ARM::Q11, 440 ARM::Q12, ARM::Q13, ARM::Q14, ARM::Q15, 441 ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, 442 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7 }; 443 444 QPRClass::iterator 445 QPRClass::allocation_order_begin(const MachineFunction &MF) const { 446 return ARM_QPR; 447 } 448 449 QPRClass::iterator 450 QPRClass::allocation_order_end(const MachineFunction &MF) const { 451 return ARM_QPR + (sizeof(ARM_QPR)/sizeof(unsigned)); 452 } 453 }]; 454} 455 456// Subset of QPR that have 32-bit SPR subregs. 457def QPR_VFP2 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 458 128, 459 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]> { 460 let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3), 461 (DPR_VFP2 dsub_0, dsub_1)]; 462} 463 464// Subset of QPR that have DPR_8 and SPR_8 subregs. 465def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 466 128, 467 [Q0, Q1, Q2, Q3]> { 468 let SubRegClasses = [(SPR_8 ssub_0, ssub_1, ssub_2, ssub_3), 469 (DPR_8 dsub_0, dsub_1)]; 470} 471 472// Pseudo 256-bit vector register class to model pairs of Q registers 473// (4 consecutive D registers). 474def QQPR : RegisterClass<"ARM", [v4i64], 475 256, 476 [QQ0, QQ1, QQ2, QQ3, QQ4, QQ5, QQ6, QQ7]> { 477 let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3), 478 (QPR qsub_0, qsub_1)]; 479 let MethodProtos = [{ 480 iterator allocation_order_begin(const MachineFunction &MF) const; 481 iterator allocation_order_end(const MachineFunction &MF) const; 482 }]; 483 let MethodBodies = [{ 484 // QQ2-QQ3 are callee saved and should be allocated last. 485 // Save other low registers for use as QPR_VFP2 and QPR_8 classes. 486 static const unsigned ARM_QQPR[] = { 487 ARM::QQ4, ARM::QQ5, ARM::QQ6, ARM::QQ7, 488 ARM::QQ0, ARM::QQ1, ARM::QQ2, ARM::QQ3 }; 489 490 QQPRClass::iterator 491 QQPRClass::allocation_order_begin(const MachineFunction &MF) const { 492 return ARM_QQPR; 493 } 494 495 QQPRClass::iterator 496 QQPRClass::allocation_order_end(const MachineFunction &MF) const { 497 return ARM_QQPR + (sizeof(ARM_QQPR)/sizeof(unsigned)); 498 } 499 }]; 500} 501 502// Subset of QQPR that have 32-bit SPR subregs. 503def QQPR_VFP2 : RegisterClass<"ARM", [v4i64], 504 256, 505 [QQ0, QQ1, QQ2, QQ3]> { 506 let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3), 507 (DPR_VFP2 dsub_0, dsub_1, dsub_2, dsub_3), 508 (QPR_VFP2 qsub_0, qsub_1)]; 509 510} 511 512// Pseudo 512-bit vector register class to model 4 consecutive Q registers 513// (8 consecutive D registers). 514def QQQQPR : RegisterClass<"ARM", [v8i64], 515 256, 516 [QQQQ0, QQQQ1, QQQQ2, QQQQ3]> { 517 let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3, 518 dsub_4, dsub_5, dsub_6, dsub_7), 519 (QPR qsub_0, qsub_1, qsub_2, qsub_3)]; 520 let MethodProtos = [{ 521 iterator allocation_order_begin(const MachineFunction &MF) const; 522 iterator allocation_order_end(const MachineFunction &MF) const; 523 }]; 524 let MethodBodies = [{ 525 // QQQQ1 is callee saved and should be allocated last. 526 // Save QQQQ0 for use as QPR_VFP2 and QPR_8 classes. 527 static const unsigned ARM_QQQQPR[] = { 528 ARM::QQQQ2, ARM::QQQQ3, ARM::QQQQ0, ARM::QQQQ1 }; 529 530 QQQQPRClass::iterator 531 QQQQPRClass::allocation_order_begin(const MachineFunction &MF) const { 532 return ARM_QQQQPR; 533 } 534 535 QQQQPRClass::iterator 536 QQQQPRClass::allocation_order_end(const MachineFunction &MF) const { 537 return ARM_QQQQPR + (sizeof(ARM_QQQQPR)/sizeof(unsigned)); 538 } 539 }]; 540} 541 542// Condition code registers. 543def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>; 544