AMDGPUInstructions.td revision a2b4eb6d15a13de257319ac6231b5ab622cd02b1
1//===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains instruction defs that are common to all hw codegen 11// targets. 12// 13//===----------------------------------------------------------------------===// 14 15class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction { 16 field bit isRegisterLoad = 0; 17 field bit isRegisterStore = 0; 18 19 let Namespace = "AMDGPU"; 20 let OutOperandList = outs; 21 let InOperandList = ins; 22 let AsmString = asm; 23 let Pattern = pattern; 24 let Itinerary = NullALU; 25 26 let TSFlags{63} = isRegisterLoad; 27 let TSFlags{62} = isRegisterStore; 28} 29 30class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern> 31 : AMDGPUInst<outs, ins, asm, pattern> { 32 33 field bits<32> Inst = 0xffffffff; 34 35} 36 37def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>; 38def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>; 39 40def COND_EQ : PatLeaf < 41 (cond), 42 [{switch(N->get()){{default: return false; 43 case ISD::SETOEQ: case ISD::SETUEQ: 44 case ISD::SETEQ: return true;}}}] 45>; 46 47def COND_OEQ : PatLeaf < 48 (cond), 49 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}] 50>; 51 52def COND_NE : PatLeaf < 53 (cond), 54 [{switch(N->get()){{default: return false; 55 case ISD::SETONE: case ISD::SETUNE: 56 case ISD::SETNE: return true;}}}] 57>; 58 59def COND_UNE : PatLeaf < 60 (cond), 61 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}] 62>; 63 64def COND_GT : PatLeaf < 65 (cond), 66 [{switch(N->get()){{default: return false; 67 case ISD::SETOGT: case ISD::SETUGT: 68 case ISD::SETGT: return true;}}}] 69>; 70 71def COND_OGT : PatLeaf < 72 (cond), 73 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}] 74>; 75 76def COND_GE : PatLeaf < 77 (cond), 78 [{switch(N->get()){{default: return false; 79 case ISD::SETOGE: case ISD::SETUGE: 80 case ISD::SETGE: return true;}}}] 81>; 82 83def COND_OGE : PatLeaf < 84 (cond), 85 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}] 86>; 87 88def COND_LT : PatLeaf < 89 (cond), 90 [{switch(N->get()){{default: return false; 91 case ISD::SETOLT: case ISD::SETULT: 92 case ISD::SETLT: return true;}}}] 93>; 94 95def COND_LE : PatLeaf < 96 (cond), 97 [{switch(N->get()){{default: return false; 98 case ISD::SETOLE: case ISD::SETULE: 99 case ISD::SETLE: return true;}}}] 100>; 101 102def COND_NULL : PatLeaf < 103 (cond), 104 [{return false;}] 105>; 106 107//===----------------------------------------------------------------------===// 108// Load/Store Pattern Fragments 109//===----------------------------------------------------------------------===// 110 111def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ 112 LoadSDNode *L = cast<LoadSDNode>(N); 113 return L->getExtensionType() == ISD::ZEXTLOAD || 114 L->getExtensionType() == ISD::EXTLOAD; 115}]>; 116 117def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 118 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; 119}]>; 120 121def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 122 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 123}]>; 124 125def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 126 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 127}]>; 128 129def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 130 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 131}]>; 132 133def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 134 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 135}]>; 136 137def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 138 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 139}]>; 140 141def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 142 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 143}]>; 144 145def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 146 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; 147}]>; 148 149def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 150 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 151}]>; 152 153def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 154 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 155}]>; 156 157def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 158 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 159}]>; 160 161def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 162 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 163}]>; 164 165def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 166 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 167}]>; 168 169def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 170 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 171}]>; 172 173def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 174 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; 175}]>; 176 177def az_extloadi32_global : PatFrag<(ops node:$ptr), 178 (az_extloadi32 node:$ptr), [{ 179 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 180}]>; 181 182def az_extloadi32_constant : PatFrag<(ops node:$ptr), 183 (az_extloadi32 node:$ptr), [{ 184 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 185}]>; 186 187def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr), 188 (truncstorei8 node:$val, node:$ptr), [{ 189 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 190}]>; 191 192def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr), 193 (truncstorei16 node:$val, node:$ptr), [{ 194 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 195}]>; 196 197def local_store : PatFrag<(ops node:$val, node:$ptr), 198 (store node:$val, node:$ptr), [{ 199 return isLocalStore(dyn_cast<StoreSDNode>(N)); 200}]>; 201 202def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr), 203 (truncstorei8 node:$val, node:$ptr), [{ 204 return isLocalStore(dyn_cast<StoreSDNode>(N)); 205}]>; 206 207def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr), 208 (truncstorei16 node:$val, node:$ptr), [{ 209 return isLocalStore(dyn_cast<StoreSDNode>(N)); 210}]>; 211 212def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 213 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 214}]>; 215 216def atomic_load_add_local : PatFrag<(ops node:$ptr, node:$value), 217 (atomic_load_add node:$ptr, node:$value), [{ 218 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 219}]>; 220 221def atomic_load_sub_local : PatFrag<(ops node:$ptr, node:$value), 222 (atomic_load_sub node:$ptr, node:$value), [{ 223 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 224}]>; 225 226def mskor_global : PatFrag<(ops node:$val, node:$ptr), 227 (AMDGPUstore_mskor node:$val, node:$ptr), [{ 228 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 229}]>; 230 231class Constants { 232int TWO_PI = 0x40c90fdb; 233int PI = 0x40490fdb; 234int TWO_PI_INV = 0x3e22f983; 235int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding 236} 237def CONST : Constants; 238 239def FP_ZERO : PatLeaf < 240 (fpimm), 241 [{return N->getValueAPF().isZero();}] 242>; 243 244def FP_ONE : PatLeaf < 245 (fpimm), 246 [{return N->isExactlyValue(1.0);}] 247>; 248 249def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>; 250def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>; 251 252let isCodeGenOnly = 1, isPseudo = 1 in { 253 254let usesCustomInserter = 1 in { 255 256class CLAMP <RegisterClass rc> : AMDGPUShaderInst < 257 (outs rc:$dst), 258 (ins rc:$src0), 259 "CLAMP $dst, $src0", 260 [(set f32:$dst, (int_AMDIL_clamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] 261>; 262 263class FABS <RegisterClass rc> : AMDGPUShaderInst < 264 (outs rc:$dst), 265 (ins rc:$src0), 266 "FABS $dst, $src0", 267 [(set f32:$dst, (fabs f32:$src0))] 268>; 269 270class FNEG <RegisterClass rc> : AMDGPUShaderInst < 271 (outs rc:$dst), 272 (ins rc:$src0), 273 "FNEG $dst, $src0", 274 [(set f32:$dst, (fneg f32:$src0))] 275>; 276 277} // usesCustomInserter = 1 278 279multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, 280 ComplexPattern addrPat> { 281let UseNamedOperandTable = 1 in { 282 283 def RegisterLoad : AMDGPUShaderInst < 284 (outs dstClass:$dst), 285 (ins addrClass:$addr, i32imm:$chan), 286 "RegisterLoad $dst, $addr", 287 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))] 288 > { 289 let isRegisterLoad = 1; 290 } 291 292 def RegisterStore : AMDGPUShaderInst < 293 (outs), 294 (ins dstClass:$val, addrClass:$addr, i32imm:$chan), 295 "RegisterStore $val, $addr", 296 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))] 297 > { 298 let isRegisterStore = 1; 299 } 300} 301} 302 303} // End isCodeGenOnly = 1, isPseudo = 1 304 305/* Generic helper patterns for intrinsics */ 306/* -------------------------------------- */ 307 308class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul> 309 : Pat < 310 (fpow f32:$src0, f32:$src1), 311 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0))) 312>; 313 314/* Other helper patterns */ 315/* --------------------- */ 316 317/* Extract element pattern */ 318class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx, 319 SubRegIndex sub_reg> 320 : Pat< 321 (sub_type (vector_extract vec_type:$src, sub_idx)), 322 (EXTRACT_SUBREG $src, sub_reg) 323>; 324 325/* Insert element pattern */ 326class Insert_Element <ValueType elem_type, ValueType vec_type, 327 int sub_idx, SubRegIndex sub_reg> 328 : Pat < 329 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx), 330 (INSERT_SUBREG $vec, $elem, sub_reg) 331>; 332 333class Vector4_Build <ValueType vecType, ValueType elemType> : Pat < 334 (vecType (build_vector elemType:$x, elemType:$y, elemType:$z, elemType:$w)), 335 (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG 336 (vecType (IMPLICIT_DEF)), $x, sub0), $y, sub1), $z, sub2), $w, sub3) 337>; 338 339// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 340// can handle COPY instructions. 341// bitconvert pattern 342class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat < 343 (dt (bitconvert (st rc:$src0))), 344 (dt rc:$src0) 345>; 346 347// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 348// can handle COPY instructions. 349class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat < 350 (vt (AMDGPUdwordaddr (vt rc:$addr))), 351 (vt rc:$addr) 352>; 353 354// BFI_INT patterns 355 356multiclass BFIPatterns <Instruction BFI_INT> { 357 358 // Definition from ISA doc: 359 // (y & x) | (z & ~x) 360 def : Pat < 361 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))), 362 (BFI_INT $x, $y, $z) 363 >; 364 365 // SHA-256 Ch function 366 // z ^ (x & (y ^ z)) 367 def : Pat < 368 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))), 369 (BFI_INT $x, $y, $z) 370 >; 371 372} 373 374// SHA-256 Ma patterns 375 376// ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y 377class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat < 378 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))), 379 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y) 380>; 381 382// Bitfield extract patterns 383 384def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>; 385def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}], 386 SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(CountTrailingOnes_32(N->getZExtValue()), MVT::i32);}]>>; 387 388class BFEPattern <Instruction BFE> : Pat < 389 (and (srl i32:$x, legalshift32:$y), bfemask:$z), 390 (BFE $x, $y, $z) 391>; 392 393// rotr pattern 394class ROTRPattern <Instruction BIT_ALIGN> : Pat < 395 (rotr i32:$src0, i32:$src1), 396 (BIT_ALIGN $src0, $src0, $src1) 397>; 398 399// 24-bit arithmetic patterns 400def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; 401 402/* 403class UMUL24Pattern <Instruction UMUL24> : Pat < 404 (mul U24:$x, U24:$y), 405 (UMUL24 $x, $y) 406>; 407*/ 408 409include "R600Instructions.td" 410 411include "SIInstrInfo.td" 412 413