PPCISelLowering.cpp revision 257f75d0b88a7d3d5ba5b7b7908a97f6dd56e27d
16e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2ba5f6eced29937e4e4851a2c0980744768413d66Nick Lewycky// 36e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith// The LLVM Compiler Infrastructure 46e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith// 59b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor// This file is distributed under the University of Illinois Open Source 69b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor// License. See LICENSE.TXT for details. 7fe057ac36b9a76cdfa37dfa003f986461fb5fb98Douglas Gregor// 8fe057ac36b9a76cdfa37dfa003f986461fb5fb98Douglas Gregor//===----------------------------------------------------------------------===// 9fe057ac36b9a76cdfa37dfa003f986461fb5fb98Douglas Gregor// 109b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor// This file implements the PPCISelLowering class. 11ba5f6eced29937e4e4851a2c0980744768413d66Nick Lewycky// 12a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor//===----------------------------------------------------------------------===// 13a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor 14a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "PPCISelLowering.h" 159b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor#include "PPCMachineFunctionInfo.h" 169b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor#include "PPCPredicates.h" 17a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "PPCTargetMachine.h" 18a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "PPCPerfectShuffle.h" 19a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/ADT/STLExtras.h" 20a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/ADT/VectorExtras.h" 21a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/Analysis/ScalarEvolutionExpressions.h" 229b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor#include "llvm/CodeGen/CallingConvLower.h" 239b3064b55f3c858923734e8b1c9831777fc22554Douglas Gregor#include "llvm/CodeGen/MachineFrameInfo.h" 24a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/CodeGen/MachineFunction.h" 25a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/CodeGen/MachineInstrBuilder.h" 26a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/CodeGen/MachineRegisterInfo.h" 27a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/CodeGen/PseudoSourceValue.h" 28a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/CodeGen/SelectionDAG.h" 29a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/Constants.h" 30a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/Function.h" 31a3a835149ed4b183e3b009a1f94a6123779d696bDouglas Gregor#include "llvm/Intrinsics.h" 3298e13eac132c16466b75af3fd2365be09130e9d6Richard Smith#include "llvm/Support/MathExtras.h" 33ad26b7376b6fd71d14b9b893eaa1ba79e029c830Anders Carlsson#include "llvm/Target/TargetOptions.h" 3428485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregor#include "llvm/Support/CommandLine.h" 3528485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregorusing namespace llvm; 3628485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregor 3728485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregorstatic cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 3828485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregorcl::desc("enable preincrement load/store generation on PPC (experimental)"), 39ad26b7376b6fd71d14b9b893eaa1ba79e029c830Anders Carlsson cl::Hidden); 4028485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregor 4128485232e411b4a296f629bcabd3904dbb9cd7beDouglas GregorPPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 4228485232e411b4a296f629bcabd3904dbb9cd7beDouglas Gregor : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) { 432eef829b19bdc59976a827fa39b409440e352bffDouglas Gregor 4498e13eac132c16466b75af3fd2365be09130e9d6Richard Smith setPow2DivIsCheap(); 452eef829b19bdc59976a827fa39b409440e352bffDouglas Gregor 461aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor // Use _setjmp/_longjmp instead of setjmp/longjmp. 471aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor setUseUnderscoreSetJmp(true); 481aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor setUseUnderscoreLongJmp(true); 491aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor 50ba5f6eced29937e4e4851a2c0980744768413d66Nick Lewycky // Set up the register classes. 511aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 521aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 531aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 541aae80b173e22fa5d649f114eb6607efac350d79Douglas Gregor 55b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 56b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 57b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand); 58b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor 59b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor setTruncStoreAction(MVT::f64, MVT::f32, Expand); 60b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor 61b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor // PowerPC has pre-inc load and store's. 62b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 63b1f6fa48960eae269a3931d1fc545ed468d9a4d2Douglas Gregor setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 64a4a301dc74dd4e7da1c35cbb3c1e03614482728bGabor Greif setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 65c71d8eb6592ae3ef498fc57db3563d1dfae48dffFrancois Pichet setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 66c71d8eb6592ae3ef498fc57db3563d1dfae48dffFrancois Pichet setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 6798e13eac132c16466b75af3fd2365be09130e9d6Richard Smith setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 68c71d8eb6592ae3ef498fc57db3563d1dfae48dffFrancois Pichet setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 69c71d8eb6592ae3ef498fc57db3563d1dfae48dffFrancois Pichet setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 70a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 71fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 72fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 73fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg) 74fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setConvertAction(MVT::ppcf128, MVT::f64, Expand); 75fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setConvertAction(MVT::ppcf128, MVT::f32, Expand); 76fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // This is used in the ppcf128->int sequence. Note it has different semantics 77fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // from FP_ROUND: that rounds to nearest, this rounds to zero. 78fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 79fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 80fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // PowerPC has no intrinsics for these particular operations 81fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); 82fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::MEMSET, MVT::Other, Expand); 83fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::MEMCPY, MVT::Other, Expand); 84fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 85fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 86a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis // PowerPC has no SREM/UREM instructions 87a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis setOperationAction(ISD::SREM, MVT::i32, Expand); 88fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::UREM, MVT::i32, Expand); 89a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis setOperationAction(ISD::SREM, MVT::i64, Expand); 90fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::UREM, MVT::i64, Expand); 91fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 92fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 93fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 94fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 95fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 96fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 97fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 98fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 99fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 100fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 101fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 102fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // We don't support sin/cos/sqrt/fmod/pow 103fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FSIN , MVT::f64, Expand); 104fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FCOS , MVT::f64, Expand); 105fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FREM , MVT::f64, Expand); 106fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FPOW , MVT::f64, Expand); 107fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FSIN , MVT::f32, Expand); 108fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FCOS , MVT::f32, Expand); 109fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FREM , MVT::f32, Expand); 110fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FPOW , MVT::f32, Expand); 111fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 112fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 113fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 114fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // If we're enabling GP optimizations, use hardware square root 115fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 116fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FSQRT, MVT::f64, Expand); 117fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FSQRT, MVT::f32, Expand); 118fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu } 119fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 120fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 121fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 122fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 123fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // PowerPC does not have BSWAP, CTPOP or CTTZ 124fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 125fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 126fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 127fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 128fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 129fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 130fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu 131fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu // PowerPC does not have ROTR 132fcaf27e185695bdf755e202aeba9632e0a8ef3c6Richard Trieu setOperationAction(ISD::ROTR, MVT::i32 , Expand); 133a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis 134a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis // PowerPC does not have Select 135a6eb5f81d13bacac01faff70a947047725b4413fArgyrios Kyrtzidis setOperationAction(ISD::SELECT, MVT::i32, Expand); 1364147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::SELECT, MVT::i64, Expand); 1374147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::SELECT, MVT::f32, Expand); 1384147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::SELECT, MVT::f64, Expand); 1394147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet 1404147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet // PowerPC wants to turn select_cc of FP into fsel when possible. 1414147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1424147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1434147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet 1444147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet // PowerPC wants to optimize integer setcc a bit 1454147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::SETCC, MVT::i32, Custom); 1464147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet 1474147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet // PowerPC does not have BRCOND which requires SetCC 1484147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::BRCOND, MVT::Other, Expand); 1494147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet 1504147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1514147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet 1524147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 1534147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1546722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks 1556722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks // PowerPC does not have [U|S]INT_TO_FP 1566722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 1576722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 1586722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks 1596722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 1606722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 1616722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 16298e13eac132c16466b75af3fd2365be09130e9d6Richard Smith setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 1636722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks 1646722155dfe056e2c3dfbacbcaffae04dea0c2be0Anna Zaks // We cannot sextinreg(i1). Expand to shifts. 165d37b360bf9f954af119c9805fdc79ab9d30e06c6Richard Smith setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 166d37b360bf9f954af119c9805fdc79ab9d30e06c6Richard Smith 167d37b360bf9f954af119c9805fdc79ab9d30e06c6Richard Smith // Support label based line numbers. 1684147d307086cf024a40a080e2bf379e9725f6f41Francois Pichet setOperationAction(ISD::LOCATION, MVT::Other, Expand); 1690706df40064d4d7559b4304af79d519033414b84Richard Smith setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 1700706df40064d4d7559b4304af79d519033414b84Richard Smith 1710706df40064d4d7559b4304af79d519033414b84Richard Smith setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 17298e13eac132c16466b75af3fd2365be09130e9d6Richard Smith setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 1730706df40064d4d7559b4304af79d519033414b84Richard Smith setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 17498e13eac132c16466b75af3fd2365be09130e9d6Richard Smith setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 1750706df40064d4d7559b4304af79d519033414b84Richard Smith 17698e13eac132c16466b75af3fd2365be09130e9d6Richard Smith 17798e13eac132c16466b75af3fd2365be09130e9d6Richard Smith // We want to legalize GlobalAddress and ConstantPool nodes into the 1780706df40064d4d7559b4304af79d519033414b84Richard Smith // appropriate instructions to materialize the address. 17998e13eac132c16466b75af3fd2365be09130e9d6Richard Smith setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1800706df40064d4d7559b4304af79d519033414b84Richard Smith setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 1810706df40064d4d7559b4304af79d519033414b84Richard Smith setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 1821c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::JumpTable, MVT::i32, Custom); 1831c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 1841c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 1851c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 1861c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::JumpTable, MVT::i64, Custom); 1871c94c16317c1a35c1549e022958188eea2567089Richard Smith 1881c94c16317c1a35c1549e022958188eea2567089Richard Smith // RET must be custom lowered, to meet ABI requirements 1891c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::RET , MVT::Other, Custom); 1901c94c16317c1a35c1549e022958188eea2567089Richard Smith 1911c94c16317c1a35c1549e022958188eea2567089Richard Smith // VASTART needs to be custom lowered to use the VarArgsFrameIndex 1921c94c16317c1a35c1549e022958188eea2567089Richard Smith setOperationAction(ISD::VASTART , MVT::Other, Custom); 1931c94c16317c1a35c1549e022958188eea2567089Richard Smith 1941c94c16317c1a35c1549e022958188eea2567089Richard Smith // VAARG is custom lowered with ELF 32 ABI 195874d253668f9ed183ca409cdff9d424925ee7800Richard Smith if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI()) 1966e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith setOperationAction(ISD::VAARG, MVT::Other, Custom); 1976e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith else 1986e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith setOperationAction(ISD::VAARG, MVT::Other, Expand); 1996e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith 2006e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith // Use the default implementation. 2016e1fd33116c2977174f2df17ac8bad2a32659db8Richard Smith setOperationAction(ISD::VACOPY , MVT::Other, Expand); 202460ef136eb96b879f149c8703938a13c35b4bc68David Blaikie setOperationAction(ISD::VAEND , MVT::Other, Expand); 203460ef136eb96b879f149c8703938a13c35b4bc68David Blaikie setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 204460ef136eb96b879f149c8703938a13c35b4bc68David Blaikie setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 205460ef136eb96b879f149c8703938a13c35b4bc68David Blaikie setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 206460ef136eb96b879f149c8703938a13c35b4bc68David Blaikie setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 207 208 // We want to custom lower some of our intrinsics. 209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 210 211 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 212 // They also have instructions for converting between i64 and fp. 213 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 214 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 215 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 216 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 217 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 218 219 // FIXME: disable this lowered code. This generates 64-bit register values, 220 // and we don't model the fact that the top part is clobbered by calls. We 221 // need to flag these together so that the value isn't live across a call. 222 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 223 224 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 225 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 226 } else { 227 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 228 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 229 } 230 231 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 232 // 64-bit PowerPC implementations can support i64 types directly 233 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 234 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 235 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 236 // 64-bit PowerPC wants to expand i128 shifts itself. 237 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 238 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 239 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 240 } else { 241 // 32-bit PowerPC wants to expand i64 shifts itself. 242 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 243 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 244 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 245 } 246 247 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 248 // First set operation action for all vector types to expand. Then we 249 // will selectively turn on ones that can be effectively codegen'd. 250 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 251 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 252 // add/sub are legal for all supported vector VT's. 253 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 254 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 255 256 // We promote all shuffles to v16i8. 257 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 258 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 259 260 // We promote all non-typed operations to v4i32. 261 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); 262 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); 263 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); 264 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); 265 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); 266 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); 267 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); 268 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); 269 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 270 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); 271 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); 272 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); 273 274 // No other operations are legal. 275 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 276 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 277 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 278 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 279 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 280 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 281 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 282 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 283 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 284 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 285 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 286 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 287 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 288 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 289 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 290 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 291 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 292 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 293 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 294 } 295 296 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 297 // with merges, splats, etc. 298 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 299 300 setOperationAction(ISD::AND , MVT::v4i32, Legal); 301 setOperationAction(ISD::OR , MVT::v4i32, Legal); 302 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 303 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 304 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 305 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 306 307 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 308 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 309 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 310 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 311 312 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 313 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 314 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 315 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 316 317 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 318 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 319 320 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 321 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 322 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 323 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 324 } 325 326 setShiftAmountType(MVT::i32); 327 setSetCCResultContents(ZeroOrOneSetCCResult); 328 329 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 330 setStackPointerRegisterToSaveRestore(PPC::X1); 331 setExceptionPointerRegister(PPC::X3); 332 setExceptionSelectorRegister(PPC::X4); 333 } else { 334 setStackPointerRegisterToSaveRestore(PPC::R1); 335 setExceptionPointerRegister(PPC::R3); 336 setExceptionSelectorRegister(PPC::R4); 337 } 338 339 // We have target-specific dag combine patterns for the following nodes: 340 setTargetDAGCombine(ISD::SINT_TO_FP); 341 setTargetDAGCombine(ISD::STORE); 342 setTargetDAGCombine(ISD::BR_CC); 343 setTargetDAGCombine(ISD::BSWAP); 344 345 // Darwin long double math library functions have $LDBL128 appended. 346 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) { 347 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 348 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 349 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 350 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 351 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 352 } 353 354 computeRegisterProperties(); 355} 356 357/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 358/// function arguments in the caller parameter area. 359unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const { 360 TargetMachine &TM = getTargetMachine(); 361 // Darwin passes everything on 4 byte boundary. 362 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 363 return 4; 364 // FIXME Elf TBD 365 return 4; 366} 367 368const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 369 switch (Opcode) { 370 default: return 0; 371 case PPCISD::FSEL: return "PPCISD::FSEL"; 372 case PPCISD::FCFID: return "PPCISD::FCFID"; 373 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 374 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 375 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 376 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 377 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 378 case PPCISD::VPERM: return "PPCISD::VPERM"; 379 case PPCISD::Hi: return "PPCISD::Hi"; 380 case PPCISD::Lo: return "PPCISD::Lo"; 381 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 382 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 383 case PPCISD::SRL: return "PPCISD::SRL"; 384 case PPCISD::SRA: return "PPCISD::SRA"; 385 case PPCISD::SHL: return "PPCISD::SHL"; 386 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 387 case PPCISD::STD_32: return "PPCISD::STD_32"; 388 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF"; 389 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho"; 390 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 391 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho"; 392 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF"; 393 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 394 case PPCISD::MFCR: return "PPCISD::MFCR"; 395 case PPCISD::VCMP: return "PPCISD::VCMP"; 396 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 397 case PPCISD::LBRX: return "PPCISD::LBRX"; 398 case PPCISD::STBRX: return "PPCISD::STBRX"; 399 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 400 case PPCISD::MFFS: return "PPCISD::MFFS"; 401 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 402 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 403 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 404 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 405 } 406} 407 408 409MVT::ValueType 410PPCTargetLowering::getSetCCResultType(const SDOperand &) const { 411 return MVT::i32; 412} 413 414 415//===----------------------------------------------------------------------===// 416// Node matching predicates, for use by the tblgen matching code. 417//===----------------------------------------------------------------------===// 418 419/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 420static bool isFloatingPointZero(SDOperand Op) { 421 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 422 return CFP->getValueAPF().isZero(); 423 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { 424 // Maybe this has already been legalized into the constant pool? 425 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 426 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 427 return CFP->getValueAPF().isZero(); 428 } 429 return false; 430} 431 432/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 433/// true if Op is undef or if it matches the specified value. 434static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 435 return Op.getOpcode() == ISD::UNDEF || 436 cast<ConstantSDNode>(Op)->getValue() == Val; 437} 438 439/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 440/// VPKUHUM instruction. 441bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 442 if (!isUnary) { 443 for (unsigned i = 0; i != 16; ++i) 444 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 445 return false; 446 } else { 447 for (unsigned i = 0; i != 8; ++i) 448 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 449 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 450 return false; 451 } 452 return true; 453} 454 455/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 456/// VPKUWUM instruction. 457bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 458 if (!isUnary) { 459 for (unsigned i = 0; i != 16; i += 2) 460 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 461 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 462 return false; 463 } else { 464 for (unsigned i = 0; i != 8; i += 2) 465 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 466 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 467 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 468 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 469 return false; 470 } 471 return true; 472} 473 474/// isVMerge - Common function, used to match vmrg* shuffles. 475/// 476static bool isVMerge(SDNode *N, unsigned UnitSize, 477 unsigned LHSStart, unsigned RHSStart) { 478 assert(N->getOpcode() == ISD::BUILD_VECTOR && 479 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 480 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 481 "Unsupported merge size!"); 482 483 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 484 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 485 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 486 LHSStart+j+i*UnitSize) || 487 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 488 RHSStart+j+i*UnitSize)) 489 return false; 490 } 491 return true; 492} 493 494/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 495/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 496bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 497 if (!isUnary) 498 return isVMerge(N, UnitSize, 8, 24); 499 return isVMerge(N, UnitSize, 8, 8); 500} 501 502/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 503/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 504bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 505 if (!isUnary) 506 return isVMerge(N, UnitSize, 0, 16); 507 return isVMerge(N, UnitSize, 0, 0); 508} 509 510 511/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 512/// amount, otherwise return -1. 513int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 514 assert(N->getOpcode() == ISD::BUILD_VECTOR && 515 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 516 // Find the first non-undef value in the shuffle mask. 517 unsigned i; 518 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 519 /*search*/; 520 521 if (i == 16) return -1; // all undef. 522 523 // Otherwise, check to see if the rest of the elements are consequtively 524 // numbered from this value. 525 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 526 if (ShiftAmt < i) return -1; 527 ShiftAmt -= i; 528 529 if (!isUnary) { 530 // Check the rest of the elements to see if they are consequtive. 531 for (++i; i != 16; ++i) 532 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 533 return -1; 534 } else { 535 // Check the rest of the elements to see if they are consequtive. 536 for (++i; i != 16; ++i) 537 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 538 return -1; 539 } 540 541 return ShiftAmt; 542} 543 544/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 545/// specifies a splat of a single element that is suitable for input to 546/// VSPLTB/VSPLTH/VSPLTW. 547bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 548 assert(N->getOpcode() == ISD::BUILD_VECTOR && 549 N->getNumOperands() == 16 && 550 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 551 552 // This is a splat operation if each element of the permute is the same, and 553 // if the value doesn't reference the second vector. 554 unsigned ElementBase = 0; 555 SDOperand Elt = N->getOperand(0); 556 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 557 ElementBase = EltV->getValue(); 558 else 559 return false; // FIXME: Handle UNDEF elements too! 560 561 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 562 return false; 563 564 // Check that they are consequtive. 565 for (unsigned i = 1; i != EltSize; ++i) { 566 if (!isa<ConstantSDNode>(N->getOperand(i)) || 567 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 568 return false; 569 } 570 571 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 572 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 573 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 574 assert(isa<ConstantSDNode>(N->getOperand(i)) && 575 "Invalid VECTOR_SHUFFLE mask!"); 576 for (unsigned j = 0; j != EltSize; ++j) 577 if (N->getOperand(i+j) != N->getOperand(j)) 578 return false; 579 } 580 581 return true; 582} 583 584/// isAllNegativeZeroVector - Returns true if all elements of build_vector 585/// are -0.0. 586bool PPC::isAllNegativeZeroVector(SDNode *N) { 587 assert(N->getOpcode() == ISD::BUILD_VECTOR); 588 if (PPC::isSplatShuffleMask(N, N->getNumOperands())) 589 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N)) 590 return CFP->getValueAPF().isNegZero(); 591 return false; 592} 593 594/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 595/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 596unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 597 assert(isSplatShuffleMask(N, EltSize)); 598 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 599} 600 601/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 602/// by using a vspltis[bhw] instruction of the specified element size, return 603/// the constant being splatted. The ByteSize field indicates the number of 604/// bytes of each element [124] -> [bhw]. 605SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 606 SDOperand OpVal(0, 0); 607 608 // If ByteSize of the splat is bigger than the element size of the 609 // build_vector, then we have a case where we are checking for a splat where 610 // multiple elements of the buildvector are folded together into a single 611 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 612 unsigned EltSize = 16/N->getNumOperands(); 613 if (EltSize < ByteSize) { 614 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 615 SDOperand UniquedVals[4]; 616 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 617 618 // See if all of the elements in the buildvector agree across. 619 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 620 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 621 // If the element isn't a constant, bail fully out. 622 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); 623 624 625 if (UniquedVals[i&(Multiple-1)].Val == 0) 626 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 627 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 628 return SDOperand(); // no match. 629 } 630 631 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 632 // either constant or undef values that are identical for each chunk. See 633 // if these chunks can form into a larger vspltis*. 634 635 // Check to see if all of the leading entries are either 0 or -1. If 636 // neither, then this won't fit into the immediate field. 637 bool LeadingZero = true; 638 bool LeadingOnes = true; 639 for (unsigned i = 0; i != Multiple-1; ++i) { 640 if (UniquedVals[i].Val == 0) continue; // Must have been undefs. 641 642 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 643 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 644 } 645 // Finally, check the least significant entry. 646 if (LeadingZero) { 647 if (UniquedVals[Multiple-1].Val == 0) 648 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 649 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 650 if (Val < 16) 651 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 652 } 653 if (LeadingOnes) { 654 if (UniquedVals[Multiple-1].Val == 0) 655 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 656 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 657 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 658 return DAG.getTargetConstant(Val, MVT::i32); 659 } 660 661 return SDOperand(); 662 } 663 664 // Check to see if this buildvec has a single non-undef value in its elements. 665 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 666 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 667 if (OpVal.Val == 0) 668 OpVal = N->getOperand(i); 669 else if (OpVal != N->getOperand(i)) 670 return SDOperand(); 671 } 672 673 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. 674 675 unsigned ValSizeInBytes = 0; 676 uint64_t Value = 0; 677 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 678 Value = CN->getValue(); 679 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 680 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 681 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 682 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 683 ValSizeInBytes = 4; 684 } 685 686 // If the splat value is larger than the element value, then we can never do 687 // this splat. The only case that we could fit the replicated bits into our 688 // immediate field for would be zero, and we prefer to use vxor for it. 689 if (ValSizeInBytes < ByteSize) return SDOperand(); 690 691 // If the element value is larger than the splat value, cut it in half and 692 // check to see if the two halves are equal. Continue doing this until we 693 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 694 while (ValSizeInBytes > ByteSize) { 695 ValSizeInBytes >>= 1; 696 697 // If the top half equals the bottom half, we're still ok. 698 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 699 (Value & ((1 << (8*ValSizeInBytes))-1))) 700 return SDOperand(); 701 } 702 703 // Properly sign extend the value. 704 int ShAmt = (4-ByteSize)*8; 705 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 706 707 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 708 if (MaskVal == 0) return SDOperand(); 709 710 // Finally, if this value fits in a 5 bit sext field, return it 711 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 712 return DAG.getTargetConstant(MaskVal, MVT::i32); 713 return SDOperand(); 714} 715 716//===----------------------------------------------------------------------===// 717// Addressing Mode Selection 718//===----------------------------------------------------------------------===// 719 720/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 721/// or 64-bit immediate, and if the value can be accurately represented as a 722/// sign extension from a 16-bit value. If so, this returns true and the 723/// immediate. 724static bool isIntS16Immediate(SDNode *N, short &Imm) { 725 if (N->getOpcode() != ISD::Constant) 726 return false; 727 728 Imm = (short)cast<ConstantSDNode>(N)->getValue(); 729 if (N->getValueType(0) == MVT::i32) 730 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue(); 731 else 732 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); 733} 734static bool isIntS16Immediate(SDOperand Op, short &Imm) { 735 return isIntS16Immediate(Op.Val, Imm); 736} 737 738 739/// SelectAddressRegReg - Given the specified addressed, check to see if it 740/// can be represented as an indexed [r+r] operation. Returns false if it 741/// can be more efficiently represented with [r+imm]. 742bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, 743 SDOperand &Index, 744 SelectionDAG &DAG) { 745 short imm = 0; 746 if (N.getOpcode() == ISD::ADD) { 747 if (isIntS16Immediate(N.getOperand(1), imm)) 748 return false; // r+i 749 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 750 return false; // r+i 751 752 Base = N.getOperand(0); 753 Index = N.getOperand(1); 754 return true; 755 } else if (N.getOpcode() == ISD::OR) { 756 if (isIntS16Immediate(N.getOperand(1), imm)) 757 return false; // r+i can fold it if we can. 758 759 // If this is an or of disjoint bitfields, we can codegen this as an add 760 // (for better address arithmetic) if the LHS and RHS of the OR are provably 761 // disjoint. 762 APInt LHSKnownZero, LHSKnownOne; 763 APInt RHSKnownZero, RHSKnownOne; 764 DAG.ComputeMaskedBits(N.getOperand(0), 765 APInt::getAllOnesValue(N.getOperand(0) 766 .getValueSizeInBits()), 767 LHSKnownZero, LHSKnownOne); 768 769 if (LHSKnownZero.getBoolValue()) { 770 DAG.ComputeMaskedBits(N.getOperand(1), 771 APInt::getAllOnesValue(N.getOperand(1) 772 .getValueSizeInBits()), 773 RHSKnownZero, RHSKnownOne); 774 // If all of the bits are known zero on the LHS or RHS, the add won't 775 // carry. 776 if (~(LHSKnownZero | RHSKnownZero) == 0) { 777 Base = N.getOperand(0); 778 Index = N.getOperand(1); 779 return true; 780 } 781 } 782 } 783 784 return false; 785} 786 787/// Returns true if the address N can be represented by a base register plus 788/// a signed 16-bit displacement [r+imm], and if it is not better 789/// represented as reg+reg. 790bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, 791 SDOperand &Base, SelectionDAG &DAG){ 792 // If this can be more profitably realized as r+r, fail. 793 if (SelectAddressRegReg(N, Disp, Base, DAG)) 794 return false; 795 796 if (N.getOpcode() == ISD::ADD) { 797 short imm = 0; 798 if (isIntS16Immediate(N.getOperand(1), imm)) { 799 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 800 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 801 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 802 } else { 803 Base = N.getOperand(0); 804 } 805 return true; // [r+i] 806 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 807 // Match LOAD (ADD (X, Lo(G))). 808 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 809 && "Cannot handle constant offsets yet!"); 810 Disp = N.getOperand(1).getOperand(0); // The global address. 811 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 812 Disp.getOpcode() == ISD::TargetConstantPool || 813 Disp.getOpcode() == ISD::TargetJumpTable); 814 Base = N.getOperand(0); 815 return true; // [&g+r] 816 } 817 } else if (N.getOpcode() == ISD::OR) { 818 short imm = 0; 819 if (isIntS16Immediate(N.getOperand(1), imm)) { 820 // If this is an or of disjoint bitfields, we can codegen this as an add 821 // (for better address arithmetic) if the LHS and RHS of the OR are 822 // provably disjoint. 823 APInt LHSKnownZero, LHSKnownOne; 824 DAG.ComputeMaskedBits(N.getOperand(0), 825 APInt::getAllOnesValue(32), 826 LHSKnownZero, LHSKnownOne); 827 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 828 // If all of the bits are known zero on the LHS or RHS, the add won't 829 // carry. 830 Base = N.getOperand(0); 831 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 832 return true; 833 } 834 } 835 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 836 // Loading from a constant address. 837 838 // If this address fits entirely in a 16-bit sext immediate field, codegen 839 // this as "d, 0" 840 short Imm; 841 if (isIntS16Immediate(CN, Imm)) { 842 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 843 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 844 return true; 845 } 846 847 // Handle 32-bit sext immediates with LIS + addr mode. 848 if (CN->getValueType(0) == MVT::i32 || 849 (int64_t)CN->getValue() == (int)CN->getValue()) { 850 int Addr = (int)CN->getValue(); 851 852 // Otherwise, break this down into an LIS + disp. 853 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 854 855 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 856 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 857 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 858 return true; 859 } 860 } 861 862 Disp = DAG.getTargetConstant(0, getPointerTy()); 863 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 864 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 865 else 866 Base = N; 867 return true; // [r+0] 868} 869 870/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 871/// represented as an indexed [r+r] operation. 872bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, 873 SDOperand &Index, 874 SelectionDAG &DAG) { 875 // Check to see if we can easily represent this as an [r+r] address. This 876 // will fail if it thinks that the address is more profitably represented as 877 // reg+imm, e.g. where imm = 0. 878 if (SelectAddressRegReg(N, Base, Index, DAG)) 879 return true; 880 881 // If the operand is an addition, always emit this as [r+r], since this is 882 // better (for code size, and execution, as the memop does the add for free) 883 // than emitting an explicit add. 884 if (N.getOpcode() == ISD::ADD) { 885 Base = N.getOperand(0); 886 Index = N.getOperand(1); 887 return true; 888 } 889 890 // Otherwise, do it the hard way, using R0 as the base register. 891 Base = DAG.getRegister(PPC::R0, N.getValueType()); 892 Index = N; 893 return true; 894} 895 896/// SelectAddressRegImmShift - Returns true if the address N can be 897/// represented by a base register plus a signed 14-bit displacement 898/// [r+imm*4]. Suitable for use by STD and friends. 899bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, 900 SDOperand &Base, 901 SelectionDAG &DAG) { 902 // If this can be more profitably realized as r+r, fail. 903 if (SelectAddressRegReg(N, Disp, Base, DAG)) 904 return false; 905 906 if (N.getOpcode() == ISD::ADD) { 907 short imm = 0; 908 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 909 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 910 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 911 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 912 } else { 913 Base = N.getOperand(0); 914 } 915 return true; // [r+i] 916 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 917 // Match LOAD (ADD (X, Lo(G))). 918 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 919 && "Cannot handle constant offsets yet!"); 920 Disp = N.getOperand(1).getOperand(0); // The global address. 921 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 922 Disp.getOpcode() == ISD::TargetConstantPool || 923 Disp.getOpcode() == ISD::TargetJumpTable); 924 Base = N.getOperand(0); 925 return true; // [&g+r] 926 } 927 } else if (N.getOpcode() == ISD::OR) { 928 short imm = 0; 929 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 930 // If this is an or of disjoint bitfields, we can codegen this as an add 931 // (for better address arithmetic) if the LHS and RHS of the OR are 932 // provably disjoint. 933 APInt LHSKnownZero, LHSKnownOne; 934 DAG.ComputeMaskedBits(N.getOperand(0), 935 APInt::getAllOnesValue(32), 936 LHSKnownZero, LHSKnownOne); 937 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 938 // If all of the bits are known zero on the LHS or RHS, the add won't 939 // carry. 940 Base = N.getOperand(0); 941 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 942 return true; 943 } 944 } 945 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 946 // Loading from a constant address. Verify low two bits are clear. 947 if ((CN->getValue() & 3) == 0) { 948 // If this address fits entirely in a 14-bit sext immediate field, codegen 949 // this as "d, 0" 950 short Imm; 951 if (isIntS16Immediate(CN, Imm)) { 952 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 953 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 954 return true; 955 } 956 957 // Fold the low-part of 32-bit absolute addresses into addr mode. 958 if (CN->getValueType(0) == MVT::i32 || 959 (int64_t)CN->getValue() == (int)CN->getValue()) { 960 int Addr = (int)CN->getValue(); 961 962 // Otherwise, break this down into an LIS + disp. 963 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 964 965 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 966 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 967 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 968 return true; 969 } 970 } 971 } 972 973 Disp = DAG.getTargetConstant(0, getPointerTy()); 974 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 975 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 976 else 977 Base = N; 978 return true; // [r+0] 979} 980 981 982/// getPreIndexedAddressParts - returns true by value, base pointer and 983/// offset pointer and addressing mode by reference if the node's address 984/// can be legally represented as pre-indexed load / store address. 985bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, 986 SDOperand &Offset, 987 ISD::MemIndexedMode &AM, 988 SelectionDAG &DAG) { 989 // Disabled by default for now. 990 if (!EnablePPCPreinc) return false; 991 992 SDOperand Ptr; 993 MVT::ValueType VT; 994 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 995 Ptr = LD->getBasePtr(); 996 VT = LD->getMemoryVT(); 997 998 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 999 ST = ST; 1000 Ptr = ST->getBasePtr(); 1001 VT = ST->getMemoryVT(); 1002 } else 1003 return false; 1004 1005 // PowerPC doesn't have preinc load/store instructions for vectors. 1006 if (MVT::isVector(VT)) 1007 return false; 1008 1009 // TODO: Check reg+reg first. 1010 1011 // LDU/STU use reg+imm*4, others use reg+imm. 1012 if (VT != MVT::i64) { 1013 // reg + imm 1014 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1015 return false; 1016 } else { 1017 // reg + imm * 4. 1018 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1019 return false; 1020 } 1021 1022 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1023 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1024 // sext i32 to i64 when addr mode is r+i. 1025 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1026 LD->getExtensionType() == ISD::SEXTLOAD && 1027 isa<ConstantSDNode>(Offset)) 1028 return false; 1029 } 1030 1031 AM = ISD::PRE_INC; 1032 return true; 1033} 1034 1035//===----------------------------------------------------------------------===// 1036// LowerOperation implementation 1037//===----------------------------------------------------------------------===// 1038 1039SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, 1040 SelectionDAG &DAG) { 1041 MVT::ValueType PtrVT = Op.getValueType(); 1042 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1043 Constant *C = CP->getConstVal(); 1044 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 1045 SDOperand Zero = DAG.getConstant(0, PtrVT); 1046 1047 const TargetMachine &TM = DAG.getTarget(); 1048 1049 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 1050 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 1051 1052 // If this is a non-darwin platform, we don't support non-static relo models 1053 // yet. 1054 if (TM.getRelocationModel() == Reloc::Static || 1055 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1056 // Generate non-pic code that has direct accesses to the constant pool. 1057 // The address of the global is just (hi(&g)+lo(&g)). 1058 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1059 } 1060 1061 if (TM.getRelocationModel() == Reloc::PIC_) { 1062 // With PIC, the first instruction is actually "GR+hi(&G)". 1063 Hi = DAG.getNode(ISD::ADD, PtrVT, 1064 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1065 } 1066 1067 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1068 return Lo; 1069} 1070 1071SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 1072 MVT::ValueType PtrVT = Op.getValueType(); 1073 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1074 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1075 SDOperand Zero = DAG.getConstant(0, PtrVT); 1076 1077 const TargetMachine &TM = DAG.getTarget(); 1078 1079 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 1080 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 1081 1082 // If this is a non-darwin platform, we don't support non-static relo models 1083 // yet. 1084 if (TM.getRelocationModel() == Reloc::Static || 1085 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1086 // Generate non-pic code that has direct accesses to the constant pool. 1087 // The address of the global is just (hi(&g)+lo(&g)). 1088 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1089 } 1090 1091 if (TM.getRelocationModel() == Reloc::PIC_) { 1092 // With PIC, the first instruction is actually "GR+hi(&G)". 1093 Hi = DAG.getNode(ISD::ADD, PtrVT, 1094 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1095 } 1096 1097 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1098 return Lo; 1099} 1100 1101SDOperand PPCTargetLowering::LowerGlobalTLSAddress(SDOperand Op, 1102 SelectionDAG &DAG) { 1103 assert(0 && "TLS not implemented for PPC."); 1104} 1105 1106SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op, 1107 SelectionDAG &DAG) { 1108 MVT::ValueType PtrVT = Op.getValueType(); 1109 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1110 GlobalValue *GV = GSDN->getGlobal(); 1111 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1112 // If it's a debug information descriptor, don't mess with it. 1113 if (DAG.isVerifiedDebugInfoDesc(Op)) 1114 return GA; 1115 SDOperand Zero = DAG.getConstant(0, PtrVT); 1116 1117 const TargetMachine &TM = DAG.getTarget(); 1118 1119 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 1120 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 1121 1122 // If this is a non-darwin platform, we don't support non-static relo models 1123 // yet. 1124 if (TM.getRelocationModel() == Reloc::Static || 1125 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1126 // Generate non-pic code that has direct accesses to globals. 1127 // The address of the global is just (hi(&g)+lo(&g)). 1128 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1129 } 1130 1131 if (TM.getRelocationModel() == Reloc::PIC_) { 1132 // With PIC, the first instruction is actually "GR+hi(&G)". 1133 Hi = DAG.getNode(ISD::ADD, PtrVT, 1134 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1135 } 1136 1137 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1138 1139 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV)) 1140 return Lo; 1141 1142 // If the global is weak or external, we have to go through the lazy 1143 // resolution stub. 1144 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); 1145} 1146 1147SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 1148 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1149 1150 // If we're comparing for equality to zero, expose the fact that this is 1151 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1152 // fold the new nodes. 1153 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1154 if (C->isNullValue() && CC == ISD::SETEQ) { 1155 MVT::ValueType VT = Op.getOperand(0).getValueType(); 1156 SDOperand Zext = Op.getOperand(0); 1157 if (VT < MVT::i32) { 1158 VT = MVT::i32; 1159 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 1160 } 1161 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 1162 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 1163 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 1164 DAG.getConstant(Log2b, MVT::i32)); 1165 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 1166 } 1167 // Leave comparisons against 0 and -1 alone for now, since they're usually 1168 // optimized. FIXME: revisit this when we can custom lower all setcc 1169 // optimizations. 1170 if (C->isAllOnesValue() || C->isNullValue()) 1171 return SDOperand(); 1172 } 1173 1174 // If we have an integer seteq/setne, turn it into a compare against zero 1175 // by xor'ing the rhs with the lhs, which is faster than setting a 1176 // condition register, reading it back out, and masking the correct bit. The 1177 // normal approach here uses sub to do this instead of xor. Using xor exposes 1178 // the result to other bit-twiddling opportunities. 1179 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 1180 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1181 MVT::ValueType VT = Op.getValueType(); 1182 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), 1183 Op.getOperand(1)); 1184 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 1185 } 1186 return SDOperand(); 1187} 1188 1189SDOperand PPCTargetLowering::LowerVAARG(SDOperand Op, SelectionDAG &DAG, 1190 int VarArgsFrameIndex, 1191 int VarArgsStackOffset, 1192 unsigned VarArgsNumGPR, 1193 unsigned VarArgsNumFPR, 1194 const PPCSubtarget &Subtarget) { 1195 1196 assert(0 && "VAARG in ELF32 ABI not implemented yet!"); 1197} 1198 1199SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, 1200 int VarArgsFrameIndex, 1201 int VarArgsStackOffset, 1202 unsigned VarArgsNumGPR, 1203 unsigned VarArgsNumFPR, 1204 const PPCSubtarget &Subtarget) { 1205 1206 if (Subtarget.isMachoABI()) { 1207 // vastart just stores the address of the VarArgsFrameIndex slot into the 1208 // memory location argument. 1209 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1210 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1211 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1212 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); 1213 } 1214 1215 // For ELF 32 ABI we follow the layout of the va_list struct. 1216 // We suppose the given va_list is already allocated. 1217 // 1218 // typedef struct { 1219 // char gpr; /* index into the array of 8 GPRs 1220 // * stored in the register save area 1221 // * gpr=0 corresponds to r3, 1222 // * gpr=1 to r4, etc. 1223 // */ 1224 // char fpr; /* index into the array of 8 FPRs 1225 // * stored in the register save area 1226 // * fpr=0 corresponds to f1, 1227 // * fpr=1 to f2, etc. 1228 // */ 1229 // char *overflow_arg_area; 1230 // /* location on stack that holds 1231 // * the next overflow argument 1232 // */ 1233 // char *reg_save_area; 1234 // /* where r3:r10 and f1:f8 (if saved) 1235 // * are stored 1236 // */ 1237 // } va_list[1]; 1238 1239 1240 SDOperand ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); 1241 SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); 1242 1243 1244 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1245 1246 SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); 1247 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1248 1249 uint64_t FrameOffset = MVT::getSizeInBits(PtrVT)/8; 1250 SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1251 1252 uint64_t StackOffset = MVT::getSizeInBits(PtrVT)/8 - 1; 1253 SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1254 1255 uint64_t FPROffset = 1; 1256 SDOperand ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1257 1258 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1259 1260 // Store first byte : number of int regs 1261 SDOperand firstStore = DAG.getStore(Op.getOperand(0), ArgGPR, 1262 Op.getOperand(1), SV, 0); 1263 uint64_t nextOffset = FPROffset; 1264 SDOperand nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1), 1265 ConstFPROffset); 1266 1267 // Store second byte : number of float regs 1268 SDOperand secondStore = 1269 DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset); 1270 nextOffset += StackOffset; 1271 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset); 1272 1273 // Store second word : arguments given on stack 1274 SDOperand thirdStore = 1275 DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset); 1276 nextOffset += FrameOffset; 1277 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset); 1278 1279 // Store third word : arguments given in registers 1280 return DAG.getStore(thirdStore, FR, nextPtr, SV, nextOffset); 1281 1282} 1283 1284#include "PPCGenCallingConv.inc" 1285 1286/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1287/// depending on which subtarget is selected. 1288static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { 1289 if (Subtarget.isMachoABI()) { 1290 static const unsigned FPR[] = { 1291 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1292 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1293 }; 1294 return FPR; 1295 } 1296 1297 1298 static const unsigned FPR[] = { 1299 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1300 PPC::F8 1301 }; 1302 return FPR; 1303} 1304 1305SDOperand 1306PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, 1307 SelectionDAG &DAG, 1308 int &VarArgsFrameIndex, 1309 int &VarArgsStackOffset, 1310 unsigned &VarArgsNumGPR, 1311 unsigned &VarArgsNumFPR, 1312 const PPCSubtarget &Subtarget) { 1313 // TODO: add description of PPC stack frame format, or at least some docs. 1314 // 1315 MachineFunction &MF = DAG.getMachineFunction(); 1316 MachineFrameInfo *MFI = MF.getFrameInfo(); 1317 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1318 SmallVector<SDOperand, 8> ArgValues; 1319 SDOperand Root = Op.getOperand(0); 1320 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1321 1322 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1323 bool isPPC64 = PtrVT == MVT::i64; 1324 bool isMachoABI = Subtarget.isMachoABI(); 1325 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1326 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1327 1328 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1329 1330 static const unsigned GPR_32[] = { // 32-bit registers. 1331 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1332 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1333 }; 1334 static const unsigned GPR_64[] = { // 64-bit registers. 1335 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1336 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1337 }; 1338 1339 static const unsigned *FPR = GetFPR(Subtarget); 1340 1341 static const unsigned VR[] = { 1342 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1343 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1344 }; 1345 1346 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 1347 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8; 1348 const unsigned Num_VR_Regs = array_lengthof( VR); 1349 1350 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1351 1352 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1353 1354 // In 32-bit non-varargs functions, the stack space for vectors is after the 1355 // stack space for non-vectors. We do not use this space unless we have 1356 // too many vectors to fit in registers, something that only occurs in 1357 // constructed examples:), but we have to walk the arglist to figure 1358 // that out...for the pathological case, compute VecArgOffset as the 1359 // start of the vector parameter area. Computing VecArgOffset is the 1360 // entire point of the following loop. 1361 // Altivec is not mentioned in the ppc32 Elf Supplement, so I'm not trying 1362 // to handle Elf here. 1363 unsigned VecArgOffset = ArgOffset; 1364 if (!isVarArg && !isPPC64) { 1365 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; 1366 ++ArgNo) { 1367 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1368 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1369 ISD::ParamFlags::ParamFlagsTy Flags = 1370 cast<ConstantSDNode>(Op.getOperand(ArgNo+3))->getValue(); 1371 unsigned isByVal = Flags & ISD::ParamFlags::ByVal; 1372 1373 if (isByVal) { 1374 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 1375 ObjSize = (Flags & ISD::ParamFlags::ByValSize) >> 1376 ISD::ParamFlags::ByValSizeOffs; 1377 unsigned ArgSize = 1378 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1379 VecArgOffset += ArgSize; 1380 continue; 1381 } 1382 1383 switch(ObjectVT) { 1384 default: assert(0 && "Unhandled argument type!"); 1385 case MVT::i32: 1386 case MVT::f32: 1387 VecArgOffset += isPPC64 ? 8 : 4; 1388 break; 1389 case MVT::i64: // PPC64 1390 case MVT::f64: 1391 VecArgOffset += 8; 1392 break; 1393 case MVT::v4f32: 1394 case MVT::v4i32: 1395 case MVT::v8i16: 1396 case MVT::v16i8: 1397 // Nothing to do, we're only looking at Nonvector args here. 1398 break; 1399 } 1400 } 1401 } 1402 // We've found where the vector parameter area in memory is. Skip the 1403 // first 12 parameters; these don't use that memory. 1404 VecArgOffset = ((VecArgOffset+15)/16)*16; 1405 VecArgOffset += 12*16; 1406 1407 // Add DAG nodes to load the arguments or copy them out of registers. On 1408 // entry to a function on PPC, the arguments start after the linkage area, 1409 // although the first ones are often in registers. 1410 // 1411 // In the ELF 32 ABI, GPRs and stack are double word align: an argument 1412 // represented with two words (long long or double) must be copied to an 1413 // even GPR_idx value or to an even ArgOffset value. 1414 1415 SmallVector<SDOperand, 8> MemOps; 1416 1417 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { 1418 SDOperand ArgVal; 1419 bool needsLoad = false; 1420 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1421 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1422 unsigned ArgSize = ObjSize; 1423 ISD::ParamFlags::ParamFlagsTy Flags = 1424 cast<ConstantSDNode>(Op.getOperand(ArgNo+3))->getValue(); 1425 unsigned AlignFlag = ISD::ParamFlags::One 1426 << ISD::ParamFlags::OrigAlignmentOffs; 1427 unsigned isByVal = Flags & ISD::ParamFlags::ByVal; 1428 // See if next argument requires stack alignment in ELF 1429 bool Expand = (ObjectVT == MVT::f64) || ((ArgNo + 1 < e) && 1430 (cast<ConstantSDNode>(Op.getOperand(ArgNo+4))->getValue() & AlignFlag) && 1431 (!(Flags & AlignFlag))); 1432 1433 unsigned CurArgOffset = ArgOffset; 1434 1435 // FIXME alignment for ELF may not be right 1436 // FIXME the codegen can be much improved in some cases. 1437 // We do not have to keep everything in memory. 1438 if (isByVal) { 1439 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 1440 ObjSize = (Flags & ISD::ParamFlags::ByValSize) >> 1441 ISD::ParamFlags::ByValSizeOffs; 1442 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1443 // Double word align in ELF 1444 if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1445 // Objects of size 1 and 2 are right justified, everything else is 1446 // left justified. This means the memory address is adjusted forwards. 1447 if (ObjSize==1 || ObjSize==2) { 1448 CurArgOffset = CurArgOffset + (4 - ObjSize); 1449 } 1450 // The value of the object is its address. 1451 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); 1452 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1453 ArgValues.push_back(FIN); 1454 if (ObjSize==1 || ObjSize==2) { 1455 if (GPR_idx != Num_GPR_Regs) { 1456 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1457 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1458 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1459 SDOperand Store = DAG.getTruncStore(Val.getValue(1), Val, FIN, 1460 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 ); 1461 MemOps.push_back(Store); 1462 ++GPR_idx; 1463 if (isMachoABI) ArgOffset += PtrByteSize; 1464 } else { 1465 ArgOffset += PtrByteSize; 1466 } 1467 continue; 1468 } 1469 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 1470 // Store whatever pieces of the object are in registers 1471 // to memory. ArgVal will be address of the beginning of 1472 // the object. 1473 if (GPR_idx != Num_GPR_Regs) { 1474 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1475 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1476 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset); 1477 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1478 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1479 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1480 MemOps.push_back(Store); 1481 ++GPR_idx; 1482 if (isMachoABI) ArgOffset += PtrByteSize; 1483 } else { 1484 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 1485 break; 1486 } 1487 } 1488 continue; 1489 } 1490 1491 switch (ObjectVT) { 1492 default: assert(0 && "Unhandled argument type!"); 1493 case MVT::i32: 1494 if (!isPPC64) { 1495 // Double word align in ELF 1496 if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1497 1498 if (GPR_idx != Num_GPR_Regs) { 1499 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1500 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1501 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 1502 ++GPR_idx; 1503 } else { 1504 needsLoad = true; 1505 ArgSize = PtrByteSize; 1506 } 1507 // Stack align in ELF 1508 if (needsLoad && Expand && isELF32_ABI) 1509 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1510 // All int arguments reserve stack space in Macho ABI. 1511 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize; 1512 break; 1513 } 1514 // FALLTHROUGH 1515 case MVT::i64: // PPC64 1516 if (GPR_idx != Num_GPR_Regs) { 1517 unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 1518 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1519 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1520 1521 if (ObjectVT == MVT::i32) { 1522 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 1523 // value to MVT::i64 and then truncate to the correct register size. 1524 if (Flags & ISD::ParamFlags::SExt) 1525 ArgVal = DAG.getNode(ISD::AssertSext, MVT::i64, ArgVal, 1526 DAG.getValueType(ObjectVT)); 1527 else if (Flags & ISD::ParamFlags::ZExt) 1528 ArgVal = DAG.getNode(ISD::AssertZext, MVT::i64, ArgVal, 1529 DAG.getValueType(ObjectVT)); 1530 1531 ArgVal = DAG.getNode(ISD::TRUNCATE, MVT::i32, ArgVal); 1532 } 1533 1534 ++GPR_idx; 1535 } else { 1536 needsLoad = true; 1537 } 1538 // All int arguments reserve stack space in Macho ABI. 1539 if (isMachoABI || needsLoad) ArgOffset += 8; 1540 break; 1541 1542 case MVT::f32: 1543 case MVT::f64: 1544 // Every 4 bytes of argument space consumes one of the GPRs available for 1545 // argument passing. 1546 if (GPR_idx != Num_GPR_Regs && isMachoABI) { 1547 ++GPR_idx; 1548 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1549 ++GPR_idx; 1550 } 1551 if (FPR_idx != Num_FPR_Regs) { 1552 unsigned VReg; 1553 if (ObjectVT == MVT::f32) 1554 VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass); 1555 else 1556 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 1557 RegInfo.addLiveIn(FPR[FPR_idx], VReg); 1558 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1559 ++FPR_idx; 1560 } else { 1561 needsLoad = true; 1562 } 1563 1564 // Stack align in ELF 1565 if (needsLoad && Expand && isELF32_ABI) 1566 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1567 // All FP arguments reserve stack space in Macho ABI. 1568 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize; 1569 break; 1570 case MVT::v4f32: 1571 case MVT::v4i32: 1572 case MVT::v8i16: 1573 case MVT::v16i8: 1574 // Note that vector arguments in registers don't reserve stack space, 1575 // except in varargs functions. 1576 if (VR_idx != Num_VR_Regs) { 1577 unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass); 1578 RegInfo.addLiveIn(VR[VR_idx], VReg); 1579 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1580 if (isVarArg) { 1581 while ((ArgOffset % 16) != 0) { 1582 ArgOffset += PtrByteSize; 1583 if (GPR_idx != Num_GPR_Regs) 1584 GPR_idx++; 1585 } 1586 ArgOffset += 16; 1587 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); 1588 } 1589 ++VR_idx; 1590 } else { 1591 if (!isVarArg && !isPPC64) { 1592 // Vectors go after all the nonvectors. 1593 CurArgOffset = VecArgOffset; 1594 VecArgOffset += 16; 1595 } else { 1596 // Vectors are aligned. 1597 ArgOffset = ((ArgOffset+15)/16)*16; 1598 CurArgOffset = ArgOffset; 1599 ArgOffset += 16; 1600 } 1601 needsLoad = true; 1602 } 1603 break; 1604 } 1605 1606 // We need to load the argument to a virtual register if we determined above 1607 // that we ran out of physical registers of the appropriate type. 1608 if (needsLoad) { 1609 int FI = MFI->CreateFixedObject(ObjSize, 1610 CurArgOffset + (ArgSize - ObjSize)); 1611 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1612 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 1613 } 1614 1615 ArgValues.push_back(ArgVal); 1616 } 1617 1618 // If the function takes variable number of arguments, make a frame index for 1619 // the start of the first vararg value... for expansion of llvm.va_start. 1620 if (isVarArg) { 1621 1622 int depth; 1623 if (isELF32_ABI) { 1624 VarArgsNumGPR = GPR_idx; 1625 VarArgsNumFPR = FPR_idx; 1626 1627 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame 1628 // pointer. 1629 depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 + 1630 Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 + 1631 MVT::getSizeInBits(PtrVT)/8); 1632 1633 VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1634 ArgOffset); 1635 1636 } 1637 else 1638 depth = ArgOffset; 1639 1640 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1641 depth); 1642 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1643 1644 // In ELF 32 ABI, the fixed integer arguments of a variadic function are 1645 // stored to the VarArgsFrameIndex on the stack. 1646 if (isELF32_ABI) { 1647 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) { 1648 SDOperand Val = DAG.getRegister(GPR[GPR_idx], PtrVT); 1649 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1650 MemOps.push_back(Store); 1651 // Increment the address by four for the next argument to store 1652 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1653 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1654 } 1655 } 1656 1657 // If this function is vararg, store any remaining integer argument regs 1658 // to their spots on the stack so that they may be loaded by deferencing the 1659 // result of va_next. 1660 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 1661 unsigned VReg; 1662 if (isPPC64) 1663 VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 1664 else 1665 VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1666 1667 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1668 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1669 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1670 MemOps.push_back(Store); 1671 // Increment the address by four for the next argument to store 1672 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1673 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1674 } 1675 1676 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex 1677 // on the stack. 1678 if (isELF32_ABI) { 1679 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) { 1680 SDOperand Val = DAG.getRegister(FPR[FPR_idx], MVT::f64); 1681 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1682 MemOps.push_back(Store); 1683 // Increment the address by eight for the next argument to store 1684 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, 1685 PtrVT); 1686 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1687 } 1688 1689 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) { 1690 unsigned VReg; 1691 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 1692 1693 RegInfo.addLiveIn(FPR[FPR_idx], VReg); 1694 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64); 1695 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1696 MemOps.push_back(Store); 1697 // Increment the address by eight for the next argument to store 1698 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, 1699 PtrVT); 1700 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1701 } 1702 } 1703 } 1704 1705 if (!MemOps.empty()) 1706 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size()); 1707 1708 ArgValues.push_back(Root); 1709 1710 // Return the new list of results. 1711 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 1712 Op.Val->value_end()); 1713 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); 1714} 1715 1716/// isCallCompatibleAddress - Return the immediate to use if the specified 1717/// 32-bit value is representable in the immediate field of a BxA instruction. 1718static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { 1719 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1720 if (!C) return 0; 1721 1722 int Addr = C->getValue(); 1723 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 1724 (Addr << 6 >> 6) != Addr) 1725 return 0; // Top 6 bits have to be sext of immediate. 1726 1727 return DAG.getConstant((int)C->getValue() >> 2, 1728 DAG.getTargetLoweringInfo().getPointerTy()).Val; 1729} 1730 1731/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1732/// by "Src" to address "Dst" of size "Size". Alignment information is 1733/// specified by the specific parameter attribute. The copy will be passed as 1734/// a byval function parameter. 1735/// Sometimes what we are copying is the end of a larger object, the part that 1736/// does not fit in registers. 1737static SDOperand 1738CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1739 ISD::ParamFlags::ParamFlagsTy Flags, 1740 SelectionDAG &DAG, unsigned Size) { 1741 unsigned Align = ISD::ParamFlags::One << 1742 ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs); 1743 SDOperand AlignNode = DAG.getConstant(Align, MVT::i32); 1744 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1745 SDOperand AlwaysInline = DAG.getConstant(0, MVT::i32); 1746 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline); 1747} 1748 1749SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, 1750 const PPCSubtarget &Subtarget) { 1751 SDOperand Chain = Op.getOperand(0); 1752 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1753 SDOperand Callee = Op.getOperand(4); 1754 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1755 1756 bool isMachoABI = Subtarget.isMachoABI(); 1757 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1758 1759 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1760 bool isPPC64 = PtrVT == MVT::i64; 1761 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1762 1763 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 1764 // SelectExpr to use to put the arguments in the appropriate registers. 1765 std::vector<SDOperand> args_to_use; 1766 1767 // Count how many bytes are to be pushed on the stack, including the linkage 1768 // area, and parameter passing area. We start with 24/48 bytes, which is 1769 // prereserved space for [SP][CR][LR][3 x unused]. 1770 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1771 1772 // Add up all the space actually used. 1773 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 1774 // they all go in registers, but we must reserve stack space for them for 1775 // possible use by the caller. In varargs or 64-bit calls, parameters are 1776 // assigned stack space in order, with padding so Altivec parameters are 1777 // 16-byte aligned. 1778 unsigned nAltivecParamsAtEnd = 0; 1779 for (unsigned i = 0; i != NumOps; ++i) { 1780 SDOperand Arg = Op.getOperand(5+2*i); 1781 MVT::ValueType ArgVT = Arg.getValueType(); 1782 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 1783 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 1784 if (!isVarArg && !isPPC64) { 1785 // Non-varargs Altivec parameters go after all the non-Altivec parameters; 1786 // do those last so we know how much padding we need. 1787 nAltivecParamsAtEnd++; 1788 continue; 1789 } else { 1790 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 1791 NumBytes = ((NumBytes+15)/16)*16; 1792 } 1793 } 1794 ISD::ParamFlags::ParamFlagsTy Flags = 1795 cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue(); 1796 unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8; 1797 if (Flags & ISD::ParamFlags::ByVal) 1798 ArgSize = (Flags & ISD::ParamFlags::ByValSize) >> 1799 ISD::ParamFlags::ByValSizeOffs; 1800 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1801 NumBytes += ArgSize; 1802 } 1803 // Allow for Altivec parameters at the end, if needed. 1804 if (nAltivecParamsAtEnd) { 1805 NumBytes = ((NumBytes+15)/16)*16; 1806 NumBytes += 16*nAltivecParamsAtEnd; 1807 } 1808 1809 // The prolog code of the callee may store up to 8 GPR argument registers to 1810 // the stack, allowing va_start to index over them in memory if its varargs. 1811 // Because we cannot tell if this is needed on the caller side, we have to 1812 // conservatively assume that it is needed. As such, make sure we have at 1813 // least enough stack space for the caller to store the 8 GPRs. 1814 NumBytes = std::max(NumBytes, 1815 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI)); 1816 1817 // Adjust the stack pointer for the new arguments... 1818 // These operations are automatically eliminated by the prolog/epilog pass 1819 Chain = DAG.getCALLSEQ_START(Chain, 1820 DAG.getConstant(NumBytes, PtrVT)); 1821 SDOperand CallSeqStart = Chain; 1822 1823 // Set up a copy of the stack pointer for use loading and storing any 1824 // arguments that may not fit in the registers available for argument 1825 // passing. 1826 SDOperand StackPtr; 1827 if (isPPC64) 1828 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 1829 else 1830 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 1831 1832 // Figure out which arguments are going to go in registers, and which in 1833 // memory. Also, if this is a vararg function, floating point operations 1834 // must be stored to our stack, and loaded into integer regs as well, if 1835 // any integer regs are available for argument passing. 1836 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1837 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1838 1839 static const unsigned GPR_32[] = { // 32-bit registers. 1840 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1841 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1842 }; 1843 static const unsigned GPR_64[] = { // 64-bit registers. 1844 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1845 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1846 }; 1847 static const unsigned *FPR = GetFPR(Subtarget); 1848 1849 static const unsigned VR[] = { 1850 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1851 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1852 }; 1853 const unsigned NumGPRs = array_lengthof(GPR_32); 1854 const unsigned NumFPRs = isMachoABI ? 13 : 8; 1855 const unsigned NumVRs = array_lengthof( VR); 1856 1857 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1858 1859 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1860 SmallVector<SDOperand, 8> MemOpChains; 1861 for (unsigned i = 0; i != NumOps; ++i) { 1862 bool inMem = false; 1863 SDOperand Arg = Op.getOperand(5+2*i); 1864 ISD::ParamFlags::ParamFlagsTy Flags = 1865 cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue(); 1866 unsigned AlignFlag = ISD::ParamFlags::One << 1867 ISD::ParamFlags::OrigAlignmentOffs; 1868 // See if next argument requires stack alignment in ELF 1869 unsigned next = 5+2*(i+1)+1; 1870 bool Expand = (Arg.getValueType() == MVT::f64) || ((i + 1 < NumOps) && 1871 (cast<ConstantSDNode>(Op.getOperand(next))->getValue() & AlignFlag) && 1872 (!(Flags & AlignFlag))); 1873 1874 // PtrOff will be used to store the current argument to the stack if a 1875 // register cannot be found for it. 1876 SDOperand PtrOff; 1877 1878 // Stack align in ELF 32 1879 if (isELF32_ABI && Expand) 1880 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize, 1881 StackPtr.getValueType()); 1882 else 1883 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 1884 1885 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 1886 1887 // On PPC64, promote integers to 64-bit values. 1888 if (isPPC64 && Arg.getValueType() == MVT::i32) { 1889 unsigned ExtOp = (Flags & 1) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1890 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 1891 } 1892 1893 // FIXME Elf untested, what are alignment rules? 1894 // FIXME memcpy is used way more than necessary. Correctness first. 1895 if (Flags & ISD::ParamFlags::ByVal) { 1896 unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >> 1897 ISD::ParamFlags::ByValSizeOffs; 1898 if (isELF32_ABI && Expand) GPR_idx += (GPR_idx % 2); 1899 if (Size==1 || Size==2) { 1900 // Very small objects are passed right-justified. 1901 // Everything else is passed left-justified. 1902 MVT::ValueType VT = (Size==1) ? MVT::i8 : MVT::i16; 1903 if (GPR_idx != NumGPRs) { 1904 SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, 1905 NULL, 0, VT); 1906 MemOpChains.push_back(Load.getValue(1)); 1907 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1908 if (isMachoABI) 1909 ArgOffset += PtrByteSize; 1910 } else { 1911 SDOperand Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); 1912 SDOperand AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const); 1913 SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, 1914 CallSeqStart.Val->getOperand(0), 1915 Flags, DAG, Size); 1916 // This must go outside the CALLSEQ_START..END. 1917 SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 1918 CallSeqStart.Val->getOperand(1)); 1919 DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val); 1920 Chain = CallSeqStart = NewCallSeqStart; 1921 ArgOffset += PtrByteSize; 1922 } 1923 continue; 1924 } 1925 // Copy entire object into memory. There are cases where gcc-generated 1926 // code assumes it is there, even if it could be put entirely into 1927 // registers. (This is not what the doc says.) 1928 SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 1929 CallSeqStart.Val->getOperand(0), 1930 Flags, DAG, Size); 1931 // This must go outside the CALLSEQ_START..END. 1932 SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 1933 CallSeqStart.Val->getOperand(1)); 1934 DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val); 1935 Chain = CallSeqStart = NewCallSeqStart; 1936 // And copy the pieces of it that fit into registers. 1937 for (unsigned j=0; j<Size; j+=PtrByteSize) { 1938 SDOperand Const = DAG.getConstant(j, PtrOff.getValueType()); 1939 SDOperand AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const); 1940 if (GPR_idx != NumGPRs) { 1941 SDOperand Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0); 1942 MemOpChains.push_back(Load.getValue(1)); 1943 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1944 if (isMachoABI) 1945 ArgOffset += PtrByteSize; 1946 } else { 1947 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 1948 break; 1949 } 1950 } 1951 continue; 1952 } 1953 1954 switch (Arg.getValueType()) { 1955 default: assert(0 && "Unexpected ValueType for argument!"); 1956 case MVT::i32: 1957 case MVT::i64: 1958 // Double word align in ELF 1959 if (isELF32_ABI && Expand) GPR_idx += (GPR_idx % 2); 1960 if (GPR_idx != NumGPRs) { 1961 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 1962 } else { 1963 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1964 inMem = true; 1965 } 1966 if (inMem || isMachoABI) { 1967 // Stack align in ELF 1968 if (isELF32_ABI && Expand) 1969 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1970 1971 ArgOffset += PtrByteSize; 1972 } 1973 break; 1974 case MVT::f32: 1975 case MVT::f64: 1976 if (FPR_idx != NumFPRs) { 1977 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 1978 1979 if (isVarArg) { 1980 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1981 MemOpChains.push_back(Store); 1982 1983 // Float varargs are always shadowed in available integer registers 1984 if (GPR_idx != NumGPRs) { 1985 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1986 MemOpChains.push_back(Load.getValue(1)); 1987 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 1988 Load)); 1989 } 1990 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 1991 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 1992 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 1993 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1994 MemOpChains.push_back(Load.getValue(1)); 1995 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 1996 Load)); 1997 } 1998 } else { 1999 // If we have any FPRs remaining, we may also have GPRs remaining. 2000 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 2001 // GPRs. 2002 if (isMachoABI) { 2003 if (GPR_idx != NumGPRs) 2004 ++GPR_idx; 2005 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 2006 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 2007 ++GPR_idx; 2008 } 2009 } 2010 } else { 2011 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 2012 inMem = true; 2013 } 2014 if (inMem || isMachoABI) { 2015 // Stack align in ELF 2016 if (isELF32_ABI && Expand) 2017 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 2018 if (isPPC64) 2019 ArgOffset += 8; 2020 else 2021 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 2022 } 2023 break; 2024 case MVT::v4f32: 2025 case MVT::v4i32: 2026 case MVT::v8i16: 2027 case MVT::v16i8: 2028 if (isVarArg) { 2029 // These go aligned on the stack, or in the corresponding R registers 2030 // when within range. The Darwin PPC ABI doc claims they also go in 2031 // V registers; in fact gcc does this only for arguments that are 2032 // prototyped, not for those that match the ... We do it for all 2033 // arguments, seems to work. 2034 while (ArgOffset % 16 !=0) { 2035 ArgOffset += PtrByteSize; 2036 if (GPR_idx != NumGPRs) 2037 GPR_idx++; 2038 } 2039 // We could elide this store in the case where the object fits 2040 // entirely in R registers. Maybe later. 2041 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2042 DAG.getConstant(ArgOffset, PtrVT)); 2043 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2044 MemOpChains.push_back(Store); 2045 if (VR_idx != NumVRs) { 2046 SDOperand Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0); 2047 MemOpChains.push_back(Load.getValue(1)); 2048 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 2049 } 2050 ArgOffset += 16; 2051 for (unsigned i=0; i<16; i+=PtrByteSize) { 2052 if (GPR_idx == NumGPRs) 2053 break; 2054 SDOperand Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff, 2055 DAG.getConstant(i, PtrVT)); 2056 SDOperand Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0); 2057 MemOpChains.push_back(Load.getValue(1)); 2058 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2059 } 2060 break; 2061 } 2062 // Non-varargs Altivec params generally go in registers, but have 2063 // stack space allocated at the end. 2064 if (VR_idx != NumVRs) { 2065 // Doesn't have GPR space allocated. 2066 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 2067 } else if (nAltivecParamsAtEnd==0) { 2068 // We are emitting Altivec params in order. 2069 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2070 DAG.getConstant(ArgOffset, PtrVT)); 2071 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2072 MemOpChains.push_back(Store); 2073 ArgOffset += 16; 2074 } 2075 break; 2076 } 2077 } 2078 // If all Altivec parameters fit in registers, as they usually do, 2079 // they get stack space following the non-Altivec parameters. We 2080 // don't track this here because nobody below needs it. 2081 // If there are more Altivec parameters than fit in registers emit 2082 // the stores here. 2083 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 2084 unsigned j = 0; 2085 // Offset is aligned; skip 1st 12 params which go in V registers. 2086 ArgOffset = ((ArgOffset+15)/16)*16; 2087 ArgOffset += 12*16; 2088 for (unsigned i = 0; i != NumOps; ++i) { 2089 SDOperand Arg = Op.getOperand(5+2*i); 2090 MVT::ValueType ArgType = Arg.getValueType(); 2091 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 2092 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 2093 if (++j > NumVRs) { 2094 SDOperand PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2095 DAG.getConstant(ArgOffset, PtrVT)); 2096 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2097 MemOpChains.push_back(Store); 2098 ArgOffset += 16; 2099 } 2100 } 2101 } 2102 } 2103 2104 if (!MemOpChains.empty()) 2105 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2106 &MemOpChains[0], MemOpChains.size()); 2107 2108 // Build a sequence of copy-to-reg nodes chained together with token chain 2109 // and flag operands which copy the outgoing args into the appropriate regs. 2110 SDOperand InFlag; 2111 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2112 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 2113 InFlag); 2114 InFlag = Chain.getValue(1); 2115 } 2116 2117 // With the ELF 32 ABI, set CR6 to true if this is a vararg call. 2118 if (isVarArg && isELF32_ABI) { 2119 SDOperand SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0); 2120 Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag); 2121 InFlag = Chain.getValue(1); 2122 } 2123 2124 std::vector<MVT::ValueType> NodeTys; 2125 NodeTys.push_back(MVT::Other); // Returns a chain 2126 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2127 2128 SmallVector<SDOperand, 8> Ops; 2129 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF; 2130 2131 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2132 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2133 // node so that legalize doesn't hack it. 2134 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2135 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 2136 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 2137 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 2138 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 2139 // If this is an absolute destination address, use the munged value. 2140 Callee = SDOperand(Dest, 0); 2141 else { 2142 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 2143 // to do the call, we can't use PPCISD::CALL. 2144 SDOperand MTCTROps[] = {Chain, Callee, InFlag}; 2145 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0)); 2146 InFlag = Chain.getValue(1); 2147 2148 // Copy the callee address into R12/X12 on darwin. 2149 if (isMachoABI) { 2150 unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12; 2151 Chain = DAG.getCopyToReg(Chain, Reg, Callee, InFlag); 2152 InFlag = Chain.getValue(1); 2153 } 2154 2155 NodeTys.clear(); 2156 NodeTys.push_back(MVT::Other); 2157 NodeTys.push_back(MVT::Flag); 2158 Ops.push_back(Chain); 2159 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF; 2160 Callee.Val = 0; 2161 } 2162 2163 // If this is a direct call, pass the chain and the callee. 2164 if (Callee.Val) { 2165 Ops.push_back(Chain); 2166 Ops.push_back(Callee); 2167 } 2168 2169 // Add argument registers to the end of the list so that they are known live 2170 // into the call. 2171 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2172 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2173 RegsToPass[i].second.getValueType())); 2174 2175 if (InFlag.Val) 2176 Ops.push_back(InFlag); 2177 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 2178 InFlag = Chain.getValue(1); 2179 2180 Chain = DAG.getCALLSEQ_END(Chain, 2181 DAG.getConstant(NumBytes, PtrVT), 2182 DAG.getConstant(0, PtrVT), 2183 InFlag); 2184 if (Op.Val->getValueType(0) != MVT::Other) 2185 InFlag = Chain.getValue(1); 2186 2187 SDOperand ResultVals[9]; 2188 unsigned NumResults = 0; 2189 NodeTys.clear(); 2190 2191 // If the call has results, copy the values out of the ret val registers. 2192 switch (Op.Val->getValueType(0)) { 2193 default: assert(0 && "Unexpected ret value!"); 2194 case MVT::Other: break; 2195 case MVT::i32: 2196 // There are 8 result regs for Complex double, and 4 for Complex long long. 2197 if (Op.Val->getNumValues()>=8 && Op.Val->getValueType(7) == MVT::i32) { 2198 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 2199 ResultVals[0] = Chain.getValue(0); 2200 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, 2201 Chain.getValue(2)).getValue(1); 2202 ResultVals[1] = Chain.getValue(0); 2203 Chain = DAG.getCopyFromReg(Chain, PPC::R5, MVT::i32, 2204 Chain.getValue(2)).getValue(1); 2205 ResultVals[2] = Chain.getValue(0); 2206 Chain = DAG.getCopyFromReg(Chain, PPC::R6, MVT::i32, 2207 Chain.getValue(2)).getValue(1); 2208 ResultVals[3] = Chain.getValue(0); 2209 Chain = DAG.getCopyFromReg(Chain, PPC::R7, MVT::i32, 2210 Chain.getValue(2)).getValue(1); 2211 ResultVals[4] = Chain.getValue(0); 2212 Chain = DAG.getCopyFromReg(Chain, PPC::R8, MVT::i32, 2213 Chain.getValue(2)).getValue(1); 2214 ResultVals[5] = Chain.getValue(0); 2215 Chain = DAG.getCopyFromReg(Chain, PPC::R9, MVT::i32, 2216 Chain.getValue(2)).getValue(1); 2217 ResultVals[6] = Chain.getValue(0); 2218 Chain = DAG.getCopyFromReg(Chain, PPC::R10, MVT::i32, 2219 Chain.getValue(2)).getValue(1); 2220 ResultVals[7] = Chain.getValue(0); 2221 NumResults = 8; 2222 NodeTys.push_back(MVT::i32); 2223 NodeTys.push_back(MVT::i32); 2224 NodeTys.push_back(MVT::i32); 2225 NodeTys.push_back(MVT::i32); 2226 NodeTys.push_back(MVT::i32); 2227 NodeTys.push_back(MVT::i32); 2228 NodeTys.push_back(MVT::i32); 2229 } else if (Op.Val->getNumValues()>=4 && 2230 Op.Val->getValueType(3) == MVT::i32) { 2231 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 2232 ResultVals[0] = Chain.getValue(0); 2233 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, 2234 Chain.getValue(2)).getValue(1); 2235 ResultVals[1] = Chain.getValue(0); 2236 Chain = DAG.getCopyFromReg(Chain, PPC::R5, MVT::i32, 2237 Chain.getValue(2)).getValue(1); 2238 ResultVals[2] = Chain.getValue(0); 2239 Chain = DAG.getCopyFromReg(Chain, PPC::R6, MVT::i32, 2240 Chain.getValue(2)).getValue(1); 2241 ResultVals[3] = Chain.getValue(0); 2242 NumResults = 4; 2243 NodeTys.push_back(MVT::i32); 2244 NodeTys.push_back(MVT::i32); 2245 NodeTys.push_back(MVT::i32); 2246 } else if (Op.Val->getValueType(1) == MVT::i32) { 2247 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 2248 ResultVals[0] = Chain.getValue(0); 2249 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, 2250 Chain.getValue(2)).getValue(1); 2251 ResultVals[1] = Chain.getValue(0); 2252 NumResults = 2; 2253 NodeTys.push_back(MVT::i32); 2254 } else { 2255 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 2256 ResultVals[0] = Chain.getValue(0); 2257 NumResults = 1; 2258 } 2259 NodeTys.push_back(MVT::i32); 2260 break; 2261 case MVT::i64: 2262 if (Op.Val->getNumValues()>=4 && 2263 Op.Val->getValueType(3) == MVT::i64) { 2264 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 2265 ResultVals[0] = Chain.getValue(0); 2266 Chain = DAG.getCopyFromReg(Chain, PPC::X4, MVT::i64, 2267 Chain.getValue(2)).getValue(1); 2268 ResultVals[1] = Chain.getValue(0); 2269 Chain = DAG.getCopyFromReg(Chain, PPC::X5, MVT::i64, 2270 Chain.getValue(2)).getValue(1); 2271 ResultVals[2] = Chain.getValue(0); 2272 Chain = DAG.getCopyFromReg(Chain, PPC::X6, MVT::i64, 2273 Chain.getValue(2)).getValue(1); 2274 ResultVals[3] = Chain.getValue(0); 2275 NumResults = 4; 2276 NodeTys.push_back(MVT::i64); 2277 NodeTys.push_back(MVT::i64); 2278 NodeTys.push_back(MVT::i64); 2279 } else if (Op.Val->getValueType(1) == MVT::i64) { 2280 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 2281 ResultVals[0] = Chain.getValue(0); 2282 Chain = DAG.getCopyFromReg(Chain, PPC::X4, MVT::i64, 2283 Chain.getValue(2)).getValue(1); 2284 ResultVals[1] = Chain.getValue(0); 2285 NumResults = 2; 2286 NodeTys.push_back(MVT::i64); 2287 } else { 2288 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 2289 ResultVals[0] = Chain.getValue(0); 2290 NumResults = 1; 2291 } 2292 NodeTys.push_back(MVT::i64); 2293 break; 2294 case MVT::f64: 2295 if (Op.Val->getValueType(1) == MVT::f64) { 2296 Chain = DAG.getCopyFromReg(Chain, PPC::F1, MVT::f64, InFlag).getValue(1); 2297 ResultVals[0] = Chain.getValue(0); 2298 Chain = DAG.getCopyFromReg(Chain, PPC::F2, MVT::f64, 2299 Chain.getValue(2)).getValue(1); 2300 ResultVals[1] = Chain.getValue(0); 2301 NumResults = 2; 2302 NodeTys.push_back(MVT::f64); 2303 NodeTys.push_back(MVT::f64); 2304 break; 2305 } 2306 // else fall through 2307 case MVT::f32: 2308 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0), 2309 InFlag).getValue(1); 2310 ResultVals[0] = Chain.getValue(0); 2311 NumResults = 1; 2312 NodeTys.push_back(Op.Val->getValueType(0)); 2313 break; 2314 case MVT::v4f32: 2315 case MVT::v4i32: 2316 case MVT::v8i16: 2317 case MVT::v16i8: 2318 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0), 2319 InFlag).getValue(1); 2320 ResultVals[0] = Chain.getValue(0); 2321 NumResults = 1; 2322 NodeTys.push_back(Op.Val->getValueType(0)); 2323 break; 2324 } 2325 2326 NodeTys.push_back(MVT::Other); 2327 2328 // If the function returns void, just return the chain. 2329 if (NumResults == 0) 2330 return Chain; 2331 2332 // Otherwise, merge everything together with a MERGE_VALUES node. 2333 ResultVals[NumResults++] = Chain; 2334 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 2335 ResultVals, NumResults); 2336 return Res.getValue(Op.ResNo); 2337} 2338 2339SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, 2340 TargetMachine &TM) { 2341 SmallVector<CCValAssign, 16> RVLocs; 2342 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 2343 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 2344 CCState CCInfo(CC, isVarArg, TM, RVLocs); 2345 CCInfo.AnalyzeReturn(Op.Val, RetCC_PPC); 2346 2347 // If this is the first return lowered for this function, add the regs to the 2348 // liveout set for the function. 2349 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 2350 for (unsigned i = 0; i != RVLocs.size(); ++i) 2351 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 2352 } 2353 2354 SDOperand Chain = Op.getOperand(0); 2355 SDOperand Flag; 2356 2357 // Copy the result values into the output registers. 2358 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2359 CCValAssign &VA = RVLocs[i]; 2360 assert(VA.isRegLoc() && "Can only return in registers!"); 2361 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag); 2362 Flag = Chain.getValue(1); 2363 } 2364 2365 if (Flag.Val) 2366 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag); 2367 else 2368 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain); 2369} 2370 2371SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, 2372 const PPCSubtarget &Subtarget) { 2373 // When we pop the dynamic allocation we need to restore the SP link. 2374 2375 // Get the corect type for pointers. 2376 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2377 2378 // Construct the stack pointer operand. 2379 bool IsPPC64 = Subtarget.isPPC64(); 2380 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; 2381 SDOperand StackPtr = DAG.getRegister(SP, PtrVT); 2382 2383 // Get the operands for the STACKRESTORE. 2384 SDOperand Chain = Op.getOperand(0); 2385 SDOperand SaveSP = Op.getOperand(1); 2386 2387 // Load the old link SP. 2388 SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); 2389 2390 // Restore the stack pointer. 2391 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP); 2392 2393 // Store the old link SP. 2394 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0); 2395} 2396 2397SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 2398 SelectionDAG &DAG, 2399 const PPCSubtarget &Subtarget) { 2400 MachineFunction &MF = DAG.getMachineFunction(); 2401 bool IsPPC64 = Subtarget.isPPC64(); 2402 bool isMachoABI = Subtarget.isMachoABI(); 2403 2404 // Get current frame pointer save index. The users of this index will be 2405 // primarily DYNALLOC instructions. 2406 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2407 int FPSI = FI->getFramePointerSaveIndex(); 2408 2409 // If the frame pointer save index hasn't been defined yet. 2410 if (!FPSI) { 2411 // Find out what the fix offset of the frame pointer save area. 2412 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI); 2413 2414 // Allocate the frame index for frame pointer save area. 2415 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset); 2416 // Save the result. 2417 FI->setFramePointerSaveIndex(FPSI); 2418 } 2419 2420 // Get the inputs. 2421 SDOperand Chain = Op.getOperand(0); 2422 SDOperand Size = Op.getOperand(1); 2423 2424 // Get the corect type for pointers. 2425 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2426 // Negate the size. 2427 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, 2428 DAG.getConstant(0, PtrVT), Size); 2429 // Construct a node for the frame pointer save index. 2430 SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT); 2431 // Build a DYNALLOC node. 2432 SDOperand Ops[3] = { Chain, NegSize, FPSIdx }; 2433 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 2434 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); 2435} 2436 2437 2438/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 2439/// possible. 2440SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { 2441 // Not FP? Not a fsel. 2442 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 2443 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 2444 return SDOperand(); 2445 2446 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2447 2448 // Cannot handle SETEQ/SETNE. 2449 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); 2450 2451 MVT::ValueType ResVT = Op.getValueType(); 2452 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 2453 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2454 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 2455 2456 // If the RHS of the comparison is a 0.0, we don't need to do the 2457 // subtraction at all. 2458 if (isFloatingPointZero(RHS)) 2459 switch (CC) { 2460 default: break; // SETUO etc aren't handled by fsel. 2461 case ISD::SETULT: 2462 case ISD::SETOLT: 2463 case ISD::SETLT: 2464 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 2465 case ISD::SETUGE: 2466 case ISD::SETOGE: 2467 case ISD::SETGE: 2468 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 2469 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 2470 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 2471 case ISD::SETUGT: 2472 case ISD::SETOGT: 2473 case ISD::SETGT: 2474 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 2475 case ISD::SETULE: 2476 case ISD::SETOLE: 2477 case ISD::SETLE: 2478 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 2479 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 2480 return DAG.getNode(PPCISD::FSEL, ResVT, 2481 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 2482 } 2483 2484 SDOperand Cmp; 2485 switch (CC) { 2486 default: break; // SETUO etc aren't handled by fsel. 2487 case ISD::SETULT: 2488 case ISD::SETOLT: 2489 case ISD::SETLT: 2490 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 2491 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2492 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2493 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 2494 case ISD::SETUGE: 2495 case ISD::SETOGE: 2496 case ISD::SETGE: 2497 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 2498 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2499 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2500 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2501 case ISD::SETUGT: 2502 case ISD::SETOGT: 2503 case ISD::SETGT: 2504 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2505 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2506 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2507 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 2508 case ISD::SETULE: 2509 case ISD::SETOLE: 2510 case ISD::SETLE: 2511 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2512 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2513 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2514 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2515 } 2516 return SDOperand(); 2517} 2518 2519// FIXME: Split this code up when LegalizeDAGTypes lands. 2520SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 2521 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 2522 SDOperand Src = Op.getOperand(0); 2523 if (Src.getValueType() == MVT::f32) 2524 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 2525 2526 SDOperand Tmp; 2527 switch (Op.getValueType()) { 2528 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 2529 case MVT::i32: 2530 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 2531 break; 2532 case MVT::i64: 2533 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 2534 break; 2535 } 2536 2537 // Convert the FP value to an int value through memory. 2538 SDOperand FIPtr = DAG.CreateStackTemporary(MVT::f64); 2539 2540 // Emit a store to the stack slot. 2541 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0); 2542 2543 // Result is a load from the stack slot. If loading 4 bytes, make sure to 2544 // add in a bias. 2545 if (Op.getValueType() == MVT::i32) 2546 FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr, 2547 DAG.getConstant(4, FIPtr.getValueType())); 2548 return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0); 2549} 2550 2551SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op, 2552 SelectionDAG &DAG) { 2553 assert(Op.getValueType() == MVT::ppcf128); 2554 SDNode *Node = Op.Val; 2555 assert(Node->getOperand(0).getValueType() == MVT::ppcf128); 2556 assert(Node->getOperand(0).Val->getOpcode() == ISD::BUILD_PAIR); 2557 SDOperand Lo = Node->getOperand(0).Val->getOperand(0); 2558 SDOperand Hi = Node->getOperand(0).Val->getOperand(1); 2559 2560 // This sequence changes FPSCR to do round-to-zero, adds the two halves 2561 // of the long double, and puts FPSCR back the way it was. We do not 2562 // actually model FPSCR. 2563 std::vector<MVT::ValueType> NodeTys; 2564 SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg; 2565 2566 NodeTys.push_back(MVT::f64); // Return register 2567 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns 2568 Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); 2569 MFFSreg = Result.getValue(0); 2570 InFlag = Result.getValue(1); 2571 2572 NodeTys.clear(); 2573 NodeTys.push_back(MVT::Flag); // Returns a flag 2574 Ops[0] = DAG.getConstant(31, MVT::i32); 2575 Ops[1] = InFlag; 2576 Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2); 2577 InFlag = Result.getValue(0); 2578 2579 NodeTys.clear(); 2580 NodeTys.push_back(MVT::Flag); // Returns a flag 2581 Ops[0] = DAG.getConstant(30, MVT::i32); 2582 Ops[1] = InFlag; 2583 Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2); 2584 InFlag = Result.getValue(0); 2585 2586 NodeTys.clear(); 2587 NodeTys.push_back(MVT::f64); // result of add 2588 NodeTys.push_back(MVT::Flag); // Returns a flag 2589 Ops[0] = Lo; 2590 Ops[1] = Hi; 2591 Ops[2] = InFlag; 2592 Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3); 2593 FPreg = Result.getValue(0); 2594 InFlag = Result.getValue(1); 2595 2596 NodeTys.clear(); 2597 NodeTys.push_back(MVT::f64); 2598 Ops[0] = DAG.getConstant(1, MVT::i32); 2599 Ops[1] = MFFSreg; 2600 Ops[2] = FPreg; 2601 Ops[3] = InFlag; 2602 Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4); 2603 FPreg = Result.getValue(0); 2604 2605 // We know the low half is about to be thrown away, so just use something 2606 // convenient. 2607 return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg); 2608} 2609 2610SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 2611 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 2612 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 2613 return SDOperand(); 2614 2615 if (Op.getOperand(0).getValueType() == MVT::i64) { 2616 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 2617 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 2618 if (Op.getValueType() == MVT::f32) 2619 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); 2620 return FP; 2621 } 2622 2623 assert(Op.getOperand(0).getValueType() == MVT::i32 && 2624 "Unhandled SINT_TO_FP type in custom expander!"); 2625 // Since we only generate this in 64-bit mode, we can take advantage of 2626 // 64-bit registers. In particular, sign extend the input value into the 2627 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 2628 // then lfd it and fcfid it. 2629 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2630 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 2631 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2632 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2633 2634 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 2635 Op.getOperand(0)); 2636 2637 // STD the extended value into the stack slot. 2638 MemOperand MO(PseudoSourceValue::getFixedStack(), 2639 MemOperand::MOStore, FrameIdx, 8, 8); 2640 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 2641 DAG.getEntryNode(), Ext64, FIdx, 2642 DAG.getMemOperand(MO)); 2643 // Load the value as a double. 2644 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); 2645 2646 // FCFID it and return it. 2647 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 2648 if (Op.getValueType() == MVT::f32) 2649 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); 2650 return FP; 2651} 2652 2653SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 2654 /* 2655 The rounding mode is in bits 30:31 of FPSR, and has the following 2656 settings: 2657 00 Round to nearest 2658 01 Round to 0 2659 10 Round to +inf 2660 11 Round to -inf 2661 2662 FLT_ROUNDS, on the other hand, expects the following: 2663 -1 Undefined 2664 0 Round to 0 2665 1 Round to nearest 2666 2 Round to +inf 2667 3 Round to -inf 2668 2669 To perform the conversion, we do: 2670 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 2671 */ 2672 2673 MachineFunction &MF = DAG.getMachineFunction(); 2674 MVT::ValueType VT = Op.getValueType(); 2675 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2676 std::vector<MVT::ValueType> NodeTys; 2677 SDOperand MFFSreg, InFlag; 2678 2679 // Save FP Control Word to register 2680 NodeTys.push_back(MVT::f64); // return register 2681 NodeTys.push_back(MVT::Flag); // unused in this context 2682 SDOperand Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); 2683 2684 // Save FP register to stack slot 2685 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 2686 SDOperand StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 2687 SDOperand Store = DAG.getStore(DAG.getEntryNode(), Chain, 2688 StackSlot, NULL, 0); 2689 2690 // Load FP Control Word from low 32 bits of stack slot. 2691 SDOperand Four = DAG.getConstant(4, PtrVT); 2692 SDOperand Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four); 2693 SDOperand CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0); 2694 2695 // Transform as necessary 2696 SDOperand CWD1 = 2697 DAG.getNode(ISD::AND, MVT::i32, 2698 CWD, DAG.getConstant(3, MVT::i32)); 2699 SDOperand CWD2 = 2700 DAG.getNode(ISD::SRL, MVT::i32, 2701 DAG.getNode(ISD::AND, MVT::i32, 2702 DAG.getNode(ISD::XOR, MVT::i32, 2703 CWD, DAG.getConstant(3, MVT::i32)), 2704 DAG.getConstant(3, MVT::i32)), 2705 DAG.getConstant(1, MVT::i8)); 2706 2707 SDOperand RetVal = 2708 DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2); 2709 2710 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 2711 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 2712} 2713 2714SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { 2715 MVT::ValueType VT = Op.getValueType(); 2716 unsigned BitWidth = MVT::getSizeInBits(VT); 2717 assert(Op.getNumOperands() == 3 && 2718 VT == Op.getOperand(1).getValueType() && 2719 "Unexpected SHL!"); 2720 2721 // Expand into a bunch of logical ops. Note that these ops 2722 // depend on the PPC behavior for oversized shift amounts. 2723 SDOperand Lo = Op.getOperand(0); 2724 SDOperand Hi = Op.getOperand(1); 2725 SDOperand Amt = Op.getOperand(2); 2726 MVT::ValueType AmtVT = Amt.getValueType(); 2727 2728 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 2729 DAG.getConstant(BitWidth, AmtVT), Amt); 2730 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt); 2731 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1); 2732 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 2733 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 2734 DAG.getConstant(-BitWidth, AmtVT)); 2735 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5); 2736 SDOperand OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); 2737 SDOperand OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt); 2738 SDOperand OutOps[] = { OutLo, OutHi }; 2739 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT), 2740 OutOps, 2); 2741} 2742 2743SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { 2744 MVT::ValueType VT = Op.getValueType(); 2745 unsigned BitWidth = MVT::getSizeInBits(VT); 2746 assert(Op.getNumOperands() == 3 && 2747 VT == Op.getOperand(1).getValueType() && 2748 "Unexpected SRL!"); 2749 2750 // Expand into a bunch of logical ops. Note that these ops 2751 // depend on the PPC behavior for oversized shift amounts. 2752 SDOperand Lo = Op.getOperand(0); 2753 SDOperand Hi = Op.getOperand(1); 2754 SDOperand Amt = Op.getOperand(2); 2755 MVT::ValueType AmtVT = Amt.getValueType(); 2756 2757 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 2758 DAG.getConstant(BitWidth, AmtVT), Amt); 2759 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); 2760 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); 2761 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 2762 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 2763 DAG.getConstant(-BitWidth, AmtVT)); 2764 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5); 2765 SDOperand OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); 2766 SDOperand OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt); 2767 SDOperand OutOps[] = { OutLo, OutHi }; 2768 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT), 2769 OutOps, 2); 2770} 2771 2772SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { 2773 MVT::ValueType VT = Op.getValueType(); 2774 unsigned BitWidth = MVT::getSizeInBits(VT); 2775 assert(Op.getNumOperands() == 3 && 2776 VT == Op.getOperand(1).getValueType() && 2777 "Unexpected SRA!"); 2778 2779 // Expand into a bunch of logical ops, followed by a select_cc. 2780 SDOperand Lo = Op.getOperand(0); 2781 SDOperand Hi = Op.getOperand(1); 2782 SDOperand Amt = Op.getOperand(2); 2783 MVT::ValueType AmtVT = Amt.getValueType(); 2784 2785 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 2786 DAG.getConstant(BitWidth, AmtVT), Amt); 2787 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); 2788 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); 2789 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 2790 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 2791 DAG.getConstant(-BitWidth, AmtVT)); 2792 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5); 2793 SDOperand OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt); 2794 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT), 2795 Tmp4, Tmp6, ISD::SETLE); 2796 SDOperand OutOps[] = { OutLo, OutHi }; 2797 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT), 2798 OutOps, 2); 2799} 2800 2801//===----------------------------------------------------------------------===// 2802// Vector related lowering. 2803// 2804 2805// If this is a vector of constants or undefs, get the bits. A bit in 2806// UndefBits is set if the corresponding element of the vector is an 2807// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2808// zero. Return true if this is not an array of constants, false if it is. 2809// 2810static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 2811 uint64_t UndefBits[2]) { 2812 // Start with zero'd results. 2813 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 2814 2815 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); 2816 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2817 SDOperand OpVal = BV->getOperand(i); 2818 2819 unsigned PartNo = i >= e/2; // In the upper 128 bits? 2820 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 2821 2822 uint64_t EltBits = 0; 2823 if (OpVal.getOpcode() == ISD::UNDEF) { 2824 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 2825 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 2826 continue; 2827 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2828 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 2829 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2830 assert(CN->getValueType(0) == MVT::f32 && 2831 "Only one legal FP vector type!"); 2832 EltBits = FloatToBits(CN->getValueAPF().convertToFloat()); 2833 } else { 2834 // Nonconstant element. 2835 return true; 2836 } 2837 2838 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 2839 } 2840 2841 //printf("%llx %llx %llx %llx\n", 2842 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 2843 return false; 2844} 2845 2846// If this is a splat (repetition) of a value across the whole vector, return 2847// the smallest size that splats it. For example, "0x01010101010101..." is a 2848// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2849// SplatSize = 1 byte. 2850static bool isConstantSplat(const uint64_t Bits128[2], 2851 const uint64_t Undef128[2], 2852 unsigned &SplatBits, unsigned &SplatUndef, 2853 unsigned &SplatSize) { 2854 2855 // Don't let undefs prevent splats from matching. See if the top 64-bits are 2856 // the same as the lower 64-bits, ignoring undefs. 2857 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 2858 return false; // Can't be a splat if two pieces don't match. 2859 2860 uint64_t Bits64 = Bits128[0] | Bits128[1]; 2861 uint64_t Undef64 = Undef128[0] & Undef128[1]; 2862 2863 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 2864 // undefs. 2865 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 2866 return false; // Can't be a splat if two pieces don't match. 2867 2868 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 2869 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 2870 2871 // If the top 16-bits are different than the lower 16-bits, ignoring 2872 // undefs, we have an i32 splat. 2873 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 2874 SplatBits = Bits32; 2875 SplatUndef = Undef32; 2876 SplatSize = 4; 2877 return true; 2878 } 2879 2880 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 2881 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 2882 2883 // If the top 8-bits are different than the lower 8-bits, ignoring 2884 // undefs, we have an i16 splat. 2885 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 2886 SplatBits = Bits16; 2887 SplatUndef = Undef16; 2888 SplatSize = 2; 2889 return true; 2890 } 2891 2892 // Otherwise, we have an 8-bit splat. 2893 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 2894 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 2895 SplatSize = 1; 2896 return true; 2897} 2898 2899/// BuildSplatI - Build a canonical splati of Val with an element size of 2900/// SplatSize. Cast the result to VT. 2901static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, 2902 SelectionDAG &DAG) { 2903 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 2904 2905 static const MVT::ValueType VTys[] = { // canonical VT to use for each size. 2906 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 2907 }; 2908 2909 MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 2910 2911 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 2912 if (Val == -1) 2913 SplatSize = 1; 2914 2915 MVT::ValueType CanonicalVT = VTys[SplatSize-1]; 2916 2917 // Build a canonical splat for this value. 2918 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorElementType(CanonicalVT)); 2919 SmallVector<SDOperand, 8> Ops; 2920 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); 2921 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, 2922 &Ops[0], Ops.size()); 2923 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); 2924} 2925 2926/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 2927/// specified intrinsic ID. 2928static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, 2929 SelectionDAG &DAG, 2930 MVT::ValueType DestVT = MVT::Other) { 2931 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 2932 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2933 DAG.getConstant(IID, MVT::i32), LHS, RHS); 2934} 2935 2936/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 2937/// specified intrinsic ID. 2938static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, 2939 SDOperand Op2, SelectionDAG &DAG, 2940 MVT::ValueType DestVT = MVT::Other) { 2941 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 2942 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2943 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 2944} 2945 2946 2947/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 2948/// amount. The result has the specified value type. 2949static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, 2950 MVT::ValueType VT, SelectionDAG &DAG) { 2951 // Force LHS/RHS to be the right type. 2952 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 2953 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 2954 2955 SDOperand Ops[16]; 2956 for (unsigned i = 0; i != 16; ++i) 2957 Ops[i] = DAG.getConstant(i+Amt, MVT::i32); 2958 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 2959 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); 2960 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 2961} 2962 2963// If this is a case we can't handle, return null and let the default 2964// expansion code take care of it. If we CAN select this case, and if it 2965// selects to a single instruction, return Op. Otherwise, if we can codegen 2966// this case more efficiently than a constant pool load, lower it to the 2967// sequence of ops that should be used. 2968SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, 2969 SelectionDAG &DAG) { 2970 // If this is a vector of constants or undefs, get the bits. A bit in 2971 // UndefBits is set if the corresponding element of the vector is an 2972 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2973 // zero. 2974 uint64_t VectorBits[2]; 2975 uint64_t UndefBits[2]; 2976 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) 2977 return SDOperand(); // Not a constant vector. 2978 2979 // If this is a splat (repetition) of a value across the whole vector, return 2980 // the smallest size that splats it. For example, "0x01010101010101..." is a 2981 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2982 // SplatSize = 1 byte. 2983 unsigned SplatBits, SplatUndef, SplatSize; 2984 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 2985 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 2986 2987 // First, handle single instruction cases. 2988 2989 // All zeros? 2990 if (SplatBits == 0) { 2991 // Canonicalize all zero vectors to be v4i32. 2992 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 2993 SDOperand Z = DAG.getConstant(0, MVT::i32); 2994 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 2995 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 2996 } 2997 return Op; 2998 } 2999 3000 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 3001 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 3002 if (SextVal >= -16 && SextVal <= 15) 3003 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 3004 3005 3006 // Two instruction sequences. 3007 3008 // If this value is in the range [-32,30] and is even, use: 3009 // tmp = VSPLTI[bhw], result = add tmp, tmp 3010 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 3011 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); 3012 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); 3013 } 3014 3015 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 3016 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 3017 // for fneg/fabs. 3018 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 3019 // Make -1 and vspltisw -1: 3020 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 3021 3022 // Make the VSLW intrinsic, computing 0x8000_0000. 3023 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 3024 OnesV, DAG); 3025 3026 // xor by OnesV to invert it. 3027 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 3028 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3029 } 3030 3031 // Check to see if this is a wide variety of vsplti*, binop self cases. 3032 unsigned SplatBitSize = SplatSize*8; 3033 static const signed char SplatCsts[] = { 3034 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 3035 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 3036 }; 3037 3038 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 3039 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 3040 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 3041 int i = SplatCsts[idx]; 3042 3043 // Figure out what shift amount will be used by altivec if shifted by i in 3044 // this splat size. 3045 unsigned TypeShiftAmt = i & (SplatBitSize-1); 3046 3047 // vsplti + shl self. 3048 if (SextVal == (i << (int)TypeShiftAmt)) { 3049 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3050 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3051 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 3052 Intrinsic::ppc_altivec_vslw 3053 }; 3054 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3055 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3056 } 3057 3058 // vsplti + srl self. 3059 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3060 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3061 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3062 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 3063 Intrinsic::ppc_altivec_vsrw 3064 }; 3065 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3066 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3067 } 3068 3069 // vsplti + sra self. 3070 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3071 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3072 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3073 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 3074 Intrinsic::ppc_altivec_vsraw 3075 }; 3076 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3077 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3078 } 3079 3080 // vsplti + rol self. 3081 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 3082 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 3083 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3084 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3085 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 3086 Intrinsic::ppc_altivec_vrlw 3087 }; 3088 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3089 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3090 } 3091 3092 // t = vsplti c, result = vsldoi t, t, 1 3093 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 3094 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 3095 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 3096 } 3097 // t = vsplti c, result = vsldoi t, t, 2 3098 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 3099 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 3100 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 3101 } 3102 // t = vsplti c, result = vsldoi t, t, 3 3103 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 3104 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 3105 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 3106 } 3107 } 3108 3109 // Three instruction sequences. 3110 3111 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 3112 if (SextVal >= 0 && SextVal <= 31) { 3113 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); 3114 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 3115 LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS); 3116 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 3117 } 3118 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 3119 if (SextVal >= -31 && SextVal <= 0) { 3120 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); 3121 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 3122 LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS); 3123 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 3124 } 3125 } 3126 3127 return SDOperand(); 3128} 3129 3130/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3131/// the specified operations to build the shuffle. 3132static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, 3133 SDOperand RHS, SelectionDAG &DAG) { 3134 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3135 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3136 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3137 3138 enum { 3139 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3140 OP_VMRGHW, 3141 OP_VMRGLW, 3142 OP_VSPLTISW0, 3143 OP_VSPLTISW1, 3144 OP_VSPLTISW2, 3145 OP_VSPLTISW3, 3146 OP_VSLDOI4, 3147 OP_VSLDOI8, 3148 OP_VSLDOI12 3149 }; 3150 3151 if (OpNum == OP_COPY) { 3152 if (LHSID == (1*9+2)*9+3) return LHS; 3153 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3154 return RHS; 3155 } 3156 3157 SDOperand OpLHS, OpRHS; 3158 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 3159 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 3160 3161 unsigned ShufIdxs[16]; 3162 switch (OpNum) { 3163 default: assert(0 && "Unknown i32 permute!"); 3164 case OP_VMRGHW: 3165 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 3166 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 3167 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 3168 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 3169 break; 3170 case OP_VMRGLW: 3171 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 3172 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 3173 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 3174 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 3175 break; 3176 case OP_VSPLTISW0: 3177 for (unsigned i = 0; i != 16; ++i) 3178 ShufIdxs[i] = (i&3)+0; 3179 break; 3180 case OP_VSPLTISW1: 3181 for (unsigned i = 0; i != 16; ++i) 3182 ShufIdxs[i] = (i&3)+4; 3183 break; 3184 case OP_VSPLTISW2: 3185 for (unsigned i = 0; i != 16; ++i) 3186 ShufIdxs[i] = (i&3)+8; 3187 break; 3188 case OP_VSPLTISW3: 3189 for (unsigned i = 0; i != 16; ++i) 3190 ShufIdxs[i] = (i&3)+12; 3191 break; 3192 case OP_VSLDOI4: 3193 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 3194 case OP_VSLDOI8: 3195 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 3196 case OP_VSLDOI12: 3197 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 3198 } 3199 SDOperand Ops[16]; 3200 for (unsigned i = 0; i != 16; ++i) 3201 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32); 3202 3203 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 3204 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 3205} 3206 3207/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 3208/// is a shuffle we can handle in a single instruction, return it. Otherwise, 3209/// return the code it can be lowered into. Worst case, it can always be 3210/// lowered into a vperm. 3211SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, 3212 SelectionDAG &DAG) { 3213 SDOperand V1 = Op.getOperand(0); 3214 SDOperand V2 = Op.getOperand(1); 3215 SDOperand PermMask = Op.getOperand(2); 3216 3217 // Cases that are handled by instructions that take permute immediates 3218 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 3219 // selected by the instruction selector. 3220 if (V2.getOpcode() == ISD::UNDEF) { 3221 if (PPC::isSplatShuffleMask(PermMask.Val, 1) || 3222 PPC::isSplatShuffleMask(PermMask.Val, 2) || 3223 PPC::isSplatShuffleMask(PermMask.Val, 4) || 3224 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || 3225 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || 3226 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || 3227 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || 3228 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || 3229 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || 3230 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || 3231 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || 3232 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { 3233 return Op; 3234 } 3235 } 3236 3237 // Altivec has a variety of "shuffle immediates" that take two vector inputs 3238 // and produce a fixed permutation. If any of these match, do not lower to 3239 // VPERM. 3240 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || 3241 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || 3242 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || 3243 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || 3244 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || 3245 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || 3246 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || 3247 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || 3248 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) 3249 return Op; 3250 3251 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 3252 // perfect shuffle table to emit an optimal matching sequence. 3253 unsigned PFIndexes[4]; 3254 bool isFourElementShuffle = true; 3255 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 3256 unsigned EltNo = 8; // Start out undef. 3257 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 3258 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 3259 continue; // Undef, ignore it. 3260 3261 unsigned ByteSource = 3262 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 3263 if ((ByteSource & 3) != j) { 3264 isFourElementShuffle = false; 3265 break; 3266 } 3267 3268 if (EltNo == 8) { 3269 EltNo = ByteSource/4; 3270 } else if (EltNo != ByteSource/4) { 3271 isFourElementShuffle = false; 3272 break; 3273 } 3274 } 3275 PFIndexes[i] = EltNo; 3276 } 3277 3278 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 3279 // perfect shuffle vector to determine if it is cost effective to do this as 3280 // discrete instructions, or whether we should use a vperm. 3281 if (isFourElementShuffle) { 3282 // Compute the index in the perfect shuffle table. 3283 unsigned PFTableIndex = 3284 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3285 3286 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3287 unsigned Cost = (PFEntry >> 30); 3288 3289 // Determining when to avoid vperm is tricky. Many things affect the cost 3290 // of vperm, particularly how many times the perm mask needs to be computed. 3291 // For example, if the perm mask can be hoisted out of a loop or is already 3292 // used (perhaps because there are multiple permutes with the same shuffle 3293 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 3294 // the loop requires an extra register. 3295 // 3296 // As a compromise, we only emit discrete instructions if the shuffle can be 3297 // generated in 3 or fewer operations. When we have loop information 3298 // available, if this block is within a loop, we should avoid using vperm 3299 // for 3-operation perms and use a constant pool load instead. 3300 if (Cost < 3) 3301 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 3302 } 3303 3304 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 3305 // vector that will get spilled to the constant pool. 3306 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 3307 3308 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 3309 // that it is in input element units, not in bytes. Convert now. 3310 MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType()); 3311 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 3312 3313 SmallVector<SDOperand, 16> ResultMask; 3314 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 3315 unsigned SrcElt; 3316 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 3317 SrcElt = 0; 3318 else 3319 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 3320 3321 for (unsigned j = 0; j != BytesPerElement; ++j) 3322 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 3323 MVT::i8)); 3324 } 3325 3326 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, 3327 &ResultMask[0], ResultMask.size()); 3328 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 3329} 3330 3331/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 3332/// altivec comparison. If it is, return true and fill in Opc/isDot with 3333/// information about the intrinsic. 3334static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, 3335 bool &isDot) { 3336 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 3337 CompareOpc = -1; 3338 isDot = false; 3339 switch (IntrinsicID) { 3340 default: return false; 3341 // Comparison predicates. 3342 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 3343 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 3344 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 3345 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 3346 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 3347 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 3348 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 3349 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 3350 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 3351 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 3352 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 3353 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 3354 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 3355 3356 // Normal Comparisons. 3357 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 3358 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 3359 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 3360 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 3361 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 3362 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 3363 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 3364 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 3365 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 3366 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 3367 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 3368 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 3369 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 3370 } 3371 return true; 3372} 3373 3374/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 3375/// lower, do it, otherwise return null. 3376SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, 3377 SelectionDAG &DAG) { 3378 // If this is a lowered altivec predicate compare, CompareOpc is set to the 3379 // opcode number of the comparison. 3380 int CompareOpc; 3381 bool isDot; 3382 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 3383 return SDOperand(); // Don't custom lower most intrinsics. 3384 3385 // If this is a non-dot comparison, make the VCMP node and we are done. 3386 if (!isDot) { 3387 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 3388 Op.getOperand(1), Op.getOperand(2), 3389 DAG.getConstant(CompareOpc, MVT::i32)); 3390 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 3391 } 3392 3393 // Create the PPCISD altivec 'dot' comparison node. 3394 SDOperand Ops[] = { 3395 Op.getOperand(2), // LHS 3396 Op.getOperand(3), // RHS 3397 DAG.getConstant(CompareOpc, MVT::i32) 3398 }; 3399 std::vector<MVT::ValueType> VTs; 3400 VTs.push_back(Op.getOperand(2).getValueType()); 3401 VTs.push_back(MVT::Flag); 3402 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 3403 3404 // Now that we have the comparison, emit a copy from the CR to a GPR. 3405 // This is flagged to the above dot comparison. 3406 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 3407 DAG.getRegister(PPC::CR6, MVT::i32), 3408 CompNode.getValue(1)); 3409 3410 // Unpack the result based on how the target uses it. 3411 unsigned BitNo; // Bit # of CR6. 3412 bool InvertBit; // Invert result? 3413 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 3414 default: // Can't happen, don't crash on invalid number though. 3415 case 0: // Return the value of the EQ bit of CR6. 3416 BitNo = 0; InvertBit = false; 3417 break; 3418 case 1: // Return the inverted value of the EQ bit of CR6. 3419 BitNo = 0; InvertBit = true; 3420 break; 3421 case 2: // Return the value of the LT bit of CR6. 3422 BitNo = 2; InvertBit = false; 3423 break; 3424 case 3: // Return the inverted value of the LT bit of CR6. 3425 BitNo = 2; InvertBit = true; 3426 break; 3427 } 3428 3429 // Shift the bit into the low position. 3430 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 3431 DAG.getConstant(8-(3-BitNo), MVT::i32)); 3432 // Isolate the bit. 3433 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 3434 DAG.getConstant(1, MVT::i32)); 3435 3436 // If we are supposed to, toggle the bit. 3437 if (InvertBit) 3438 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 3439 DAG.getConstant(1, MVT::i32)); 3440 return Flags; 3441} 3442 3443SDOperand PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, 3444 SelectionDAG &DAG) { 3445 // Create a stack slot that is 16-byte aligned. 3446 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 3447 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 3448 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3449 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 3450 3451 // Store the input value into Value#0 of the stack slot. 3452 SDOperand Store = DAG.getStore(DAG.getEntryNode(), 3453 Op.getOperand(0), FIdx, NULL, 0); 3454 // Load it out. 3455 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); 3456} 3457 3458SDOperand PPCTargetLowering::LowerMUL(SDOperand Op, SelectionDAG &DAG) { 3459 if (Op.getValueType() == MVT::v4i32) { 3460 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3461 3462 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 3463 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 3464 3465 SDOperand RHSSwap = // = vrlw RHS, 16 3466 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 3467 3468 // Shrinkify inputs to v8i16. 3469 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 3470 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 3471 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 3472 3473 // Low parts multiplied together, generating 32-bit results (we ignore the 3474 // top parts). 3475 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 3476 LHS, RHS, DAG, MVT::v4i32); 3477 3478 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 3479 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 3480 // Shift the high parts up 16 bits. 3481 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 3482 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 3483 } else if (Op.getValueType() == MVT::v8i16) { 3484 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3485 3486 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 3487 3488 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 3489 LHS, RHS, Zero, DAG); 3490 } else if (Op.getValueType() == MVT::v16i8) { 3491 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3492 3493 // Multiply the even 8-bit parts, producing 16-bit sums. 3494 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 3495 LHS, RHS, DAG, MVT::v8i16); 3496 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 3497 3498 // Multiply the odd 8-bit parts, producing 16-bit sums. 3499 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 3500 LHS, RHS, DAG, MVT::v8i16); 3501 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 3502 3503 // Merge the results together. 3504 SDOperand Ops[16]; 3505 for (unsigned i = 0; i != 8; ++i) { 3506 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); 3507 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); 3508 } 3509 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 3510 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 3511 } else { 3512 assert(0 && "Unknown mul to lower!"); 3513 abort(); 3514 } 3515} 3516 3517/// LowerOperation - Provide custom lowering hooks for some operations. 3518/// 3519SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 3520 switch (Op.getOpcode()) { 3521 default: assert(0 && "Wasn't expecting to be able to lower this!"); 3522 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3523 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 3524 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3525 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 3526 case ISD::SETCC: return LowerSETCC(Op, DAG); 3527 case ISD::VASTART: 3528 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 3529 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 3530 3531 case ISD::VAARG: 3532 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 3533 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 3534 3535 case ISD::FORMAL_ARGUMENTS: 3536 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, 3537 VarArgsStackOffset, VarArgsNumGPR, 3538 VarArgsNumFPR, PPCSubTarget); 3539 3540 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget); 3541 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine()); 3542 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 3543 case ISD::DYNAMIC_STACKALLOC: 3544 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 3545 3546 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3547 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 3548 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 3549 case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG); 3550 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 3551 3552 // Lower 64-bit shifts. 3553 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 3554 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 3555 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 3556 3557 // Vector-related lowering. 3558 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 3559 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3560 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3561 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 3562 case ISD::MUL: return LowerMUL(Op, DAG); 3563 3564 // Frame & Return address. 3565 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3566 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3567 } 3568 return SDOperand(); 3569} 3570 3571SDNode *PPCTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 3572 switch (N->getOpcode()) { 3573 default: assert(0 && "Wasn't expecting to be able to lower this!"); 3574 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(SDOperand(N, 0), DAG).Val; 3575 } 3576} 3577 3578 3579//===----------------------------------------------------------------------===// 3580// Other Lowering Code 3581//===----------------------------------------------------------------------===// 3582 3583MachineBasicBlock * 3584PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 3585 MachineBasicBlock *BB) { 3586 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3587 assert((MI->getOpcode() == PPC::SELECT_CC_I4 || 3588 MI->getOpcode() == PPC::SELECT_CC_I8 || 3589 MI->getOpcode() == PPC::SELECT_CC_F4 || 3590 MI->getOpcode() == PPC::SELECT_CC_F8 || 3591 MI->getOpcode() == PPC::SELECT_CC_VRRC) && 3592 "Unexpected instr type to insert"); 3593 3594 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 3595 // control-flow pattern. The incoming instruction knows the destination vreg 3596 // to set, the condition code register to branch on, the true/false values to 3597 // select between, and a branch opcode to use. 3598 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3599 ilist<MachineBasicBlock>::iterator It = BB; 3600 ++It; 3601 3602 // thisMBB: 3603 // ... 3604 // TrueVal = ... 3605 // cmpTY ccX, r1, r2 3606 // bCC copy1MBB 3607 // fallthrough --> copy0MBB 3608 MachineBasicBlock *thisMBB = BB; 3609 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 3610 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 3611 unsigned SelectPred = MI->getOperand(4).getImm(); 3612 BuildMI(BB, TII->get(PPC::BCC)) 3613 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 3614 MachineFunction *F = BB->getParent(); 3615 F->getBasicBlockList().insert(It, copy0MBB); 3616 F->getBasicBlockList().insert(It, sinkMBB); 3617 // Update machine-CFG edges by first adding all successors of the current 3618 // block to the new block which will contain the Phi node for the select. 3619 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 3620 e = BB->succ_end(); i != e; ++i) 3621 sinkMBB->addSuccessor(*i); 3622 // Next, remove all successors of the current block, and add the true 3623 // and fallthrough blocks as its successors. 3624 while(!BB->succ_empty()) 3625 BB->removeSuccessor(BB->succ_begin()); 3626 BB->addSuccessor(copy0MBB); 3627 BB->addSuccessor(sinkMBB); 3628 3629 // copy0MBB: 3630 // %FalseValue = ... 3631 // # fallthrough to sinkMBB 3632 BB = copy0MBB; 3633 3634 // Update machine-CFG edges 3635 BB->addSuccessor(sinkMBB); 3636 3637 // sinkMBB: 3638 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 3639 // ... 3640 BB = sinkMBB; 3641 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 3642 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 3643 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 3644 3645 delete MI; // The pseudo instruction is gone now. 3646 return BB; 3647} 3648 3649//===----------------------------------------------------------------------===// 3650// Target Optimization Hooks 3651//===----------------------------------------------------------------------===// 3652 3653SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 3654 DAGCombinerInfo &DCI) const { 3655 TargetMachine &TM = getTargetMachine(); 3656 SelectionDAG &DAG = DCI.DAG; 3657 switch (N->getOpcode()) { 3658 default: break; 3659 case PPCISD::SHL: 3660 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3661 if (C->getValue() == 0) // 0 << V -> 0. 3662 return N->getOperand(0); 3663 } 3664 break; 3665 case PPCISD::SRL: 3666 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3667 if (C->getValue() == 0) // 0 >>u V -> 0. 3668 return N->getOperand(0); 3669 } 3670 break; 3671 case PPCISD::SRA: 3672 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3673 if (C->getValue() == 0 || // 0 >>s V -> 0. 3674 C->isAllOnesValue()) // -1 >>s V -> -1. 3675 return N->getOperand(0); 3676 } 3677 break; 3678 3679 case ISD::SINT_TO_FP: 3680 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 3681 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 3682 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 3683 // We allow the src/dst to be either f32/f64, but the intermediate 3684 // type must be i64. 3685 if (N->getOperand(0).getValueType() == MVT::i64 && 3686 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 3687 SDOperand Val = N->getOperand(0).getOperand(0); 3688 if (Val.getValueType() == MVT::f32) { 3689 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 3690 DCI.AddToWorklist(Val.Val); 3691 } 3692 3693 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 3694 DCI.AddToWorklist(Val.Val); 3695 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 3696 DCI.AddToWorklist(Val.Val); 3697 if (N->getValueType(0) == MVT::f32) { 3698 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val, 3699 DAG.getIntPtrConstant(0)); 3700 DCI.AddToWorklist(Val.Val); 3701 } 3702 return Val; 3703 } else if (N->getOperand(0).getValueType() == MVT::i32) { 3704 // If the intermediate type is i32, we can avoid the load/store here 3705 // too. 3706 } 3707 } 3708 } 3709 break; 3710 case ISD::STORE: 3711 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 3712 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 3713 !cast<StoreSDNode>(N)->isTruncatingStore() && 3714 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 3715 N->getOperand(1).getValueType() == MVT::i32 && 3716 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 3717 SDOperand Val = N->getOperand(1).getOperand(0); 3718 if (Val.getValueType() == MVT::f32) { 3719 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 3720 DCI.AddToWorklist(Val.Val); 3721 } 3722 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 3723 DCI.AddToWorklist(Val.Val); 3724 3725 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 3726 N->getOperand(2), N->getOperand(3)); 3727 DCI.AddToWorklist(Val.Val); 3728 return Val; 3729 } 3730 3731 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 3732 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 3733 N->getOperand(1).Val->hasOneUse() && 3734 (N->getOperand(1).getValueType() == MVT::i32 || 3735 N->getOperand(1).getValueType() == MVT::i16)) { 3736 SDOperand BSwapOp = N->getOperand(1).getOperand(0); 3737 // Do an any-extend to 32-bits if this is a half-word input. 3738 if (BSwapOp.getValueType() == MVT::i16) 3739 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 3740 3741 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 3742 N->getOperand(2), N->getOperand(3), 3743 DAG.getValueType(N->getOperand(1).getValueType())); 3744 } 3745 break; 3746 case ISD::BSWAP: 3747 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 3748 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) && 3749 N->getOperand(0).hasOneUse() && 3750 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 3751 SDOperand Load = N->getOperand(0); 3752 LoadSDNode *LD = cast<LoadSDNode>(Load); 3753 // Create the byte-swapping load. 3754 std::vector<MVT::ValueType> VTs; 3755 VTs.push_back(MVT::i32); 3756 VTs.push_back(MVT::Other); 3757 SDOperand MO = DAG.getMemOperand(LD->getMemOperand()); 3758 SDOperand Ops[] = { 3759 LD->getChain(), // Chain 3760 LD->getBasePtr(), // Ptr 3761 MO, // MemOperand 3762 DAG.getValueType(N->getValueType(0)) // VT 3763 }; 3764 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); 3765 3766 // If this is an i16 load, insert the truncate. 3767 SDOperand ResVal = BSLoad; 3768 if (N->getValueType(0) == MVT::i16) 3769 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 3770 3771 // First, combine the bswap away. This makes the value produced by the 3772 // load dead. 3773 DCI.CombineTo(N, ResVal); 3774 3775 // Next, combine the load away, we give it a bogus result value but a real 3776 // chain result. The result value is dead because the bswap is dead. 3777 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); 3778 3779 // Return N so it doesn't get rechecked! 3780 return SDOperand(N, 0); 3781 } 3782 3783 break; 3784 case PPCISD::VCMP: { 3785 // If a VCMPo node already exists with exactly the same operands as this 3786 // node, use its result instead of this node (VCMPo computes both a CR6 and 3787 // a normal output). 3788 // 3789 if (!N->getOperand(0).hasOneUse() && 3790 !N->getOperand(1).hasOneUse() && 3791 !N->getOperand(2).hasOneUse()) { 3792 3793 // Scan all of the users of the LHS, looking for VCMPo's that match. 3794 SDNode *VCMPoNode = 0; 3795 3796 SDNode *LHSN = N->getOperand(0).Val; 3797 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 3798 UI != E; ++UI) 3799 if ((*UI)->getOpcode() == PPCISD::VCMPo && 3800 (*UI)->getOperand(1) == N->getOperand(1) && 3801 (*UI)->getOperand(2) == N->getOperand(2) && 3802 (*UI)->getOperand(0) == N->getOperand(0)) { 3803 VCMPoNode = *UI; 3804 break; 3805 } 3806 3807 // If there is no VCMPo node, or if the flag value has a single use, don't 3808 // transform this. 3809 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 3810 break; 3811 3812 // Look at the (necessarily single) use of the flag value. If it has a 3813 // chain, this transformation is more complex. Note that multiple things 3814 // could use the value result, which we should ignore. 3815 SDNode *FlagUser = 0; 3816 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 3817 FlagUser == 0; ++UI) { 3818 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 3819 SDNode *User = *UI; 3820 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 3821 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { 3822 FlagUser = User; 3823 break; 3824 } 3825 } 3826 } 3827 3828 // If the user is a MFCR instruction, we know this is safe. Otherwise we 3829 // give up for right now. 3830 if (FlagUser->getOpcode() == PPCISD::MFCR) 3831 return SDOperand(VCMPoNode, 0); 3832 } 3833 break; 3834 } 3835 case ISD::BR_CC: { 3836 // If this is a branch on an altivec predicate comparison, lower this so 3837 // that we don't have to do a MFCR: instead, branch directly on CR6. This 3838 // lowering is done pre-legalize, because the legalizer lowers the predicate 3839 // compare down to code that is difficult to reassemble. 3840 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 3841 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); 3842 int CompareOpc; 3843 bool isDot; 3844 3845 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 3846 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 3847 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 3848 assert(isDot && "Can't compare against a vector result!"); 3849 3850 // If this is a comparison against something other than 0/1, then we know 3851 // that the condition is never/always true. 3852 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 3853 if (Val != 0 && Val != 1) { 3854 if (CC == ISD::SETEQ) // Cond never true, remove branch. 3855 return N->getOperand(0); 3856 // Always !=, turn it into an unconditional branch. 3857 return DAG.getNode(ISD::BR, MVT::Other, 3858 N->getOperand(0), N->getOperand(4)); 3859 } 3860 3861 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 3862 3863 // Create the PPCISD altivec 'dot' comparison node. 3864 std::vector<MVT::ValueType> VTs; 3865 SDOperand Ops[] = { 3866 LHS.getOperand(2), // LHS of compare 3867 LHS.getOperand(3), // RHS of compare 3868 DAG.getConstant(CompareOpc, MVT::i32) 3869 }; 3870 VTs.push_back(LHS.getOperand(2).getValueType()); 3871 VTs.push_back(MVT::Flag); 3872 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 3873 3874 // Unpack the result based on how the target uses it. 3875 PPC::Predicate CompOpc; 3876 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 3877 default: // Can't happen, don't crash on invalid number though. 3878 case 0: // Branch on the value of the EQ bit of CR6. 3879 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 3880 break; 3881 case 1: // Branch on the inverted value of the EQ bit of CR6. 3882 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 3883 break; 3884 case 2: // Branch on the value of the LT bit of CR6. 3885 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 3886 break; 3887 case 3: // Branch on the inverted value of the LT bit of CR6. 3888 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 3889 break; 3890 } 3891 3892 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 3893 DAG.getConstant(CompOpc, MVT::i32), 3894 DAG.getRegister(PPC::CR6, MVT::i32), 3895 N->getOperand(4), CompNode.getValue(1)); 3896 } 3897 break; 3898 } 3899 } 3900 3901 return SDOperand(); 3902} 3903 3904//===----------------------------------------------------------------------===// 3905// Inline Assembly Support 3906//===----------------------------------------------------------------------===// 3907 3908void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 3909 const APInt &Mask, 3910 APInt &KnownZero, 3911 APInt &KnownOne, 3912 const SelectionDAG &DAG, 3913 unsigned Depth) const { 3914 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 3915 switch (Op.getOpcode()) { 3916 default: break; 3917 case PPCISD::LBRX: { 3918 // lhbrx is known to have the top bits cleared out. 3919 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 3920 KnownZero = 0xFFFF0000; 3921 break; 3922 } 3923 case ISD::INTRINSIC_WO_CHAIN: { 3924 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 3925 default: break; 3926 case Intrinsic::ppc_altivec_vcmpbfp_p: 3927 case Intrinsic::ppc_altivec_vcmpeqfp_p: 3928 case Intrinsic::ppc_altivec_vcmpequb_p: 3929 case Intrinsic::ppc_altivec_vcmpequh_p: 3930 case Intrinsic::ppc_altivec_vcmpequw_p: 3931 case Intrinsic::ppc_altivec_vcmpgefp_p: 3932 case Intrinsic::ppc_altivec_vcmpgtfp_p: 3933 case Intrinsic::ppc_altivec_vcmpgtsb_p: 3934 case Intrinsic::ppc_altivec_vcmpgtsh_p: 3935 case Intrinsic::ppc_altivec_vcmpgtsw_p: 3936 case Intrinsic::ppc_altivec_vcmpgtub_p: 3937 case Intrinsic::ppc_altivec_vcmpgtuh_p: 3938 case Intrinsic::ppc_altivec_vcmpgtuw_p: 3939 KnownZero = ~1U; // All bits but the low one are known to be zero. 3940 break; 3941 } 3942 } 3943 } 3944} 3945 3946 3947/// getConstraintType - Given a constraint, return the type of 3948/// constraint it is for this target. 3949PPCTargetLowering::ConstraintType 3950PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 3951 if (Constraint.size() == 1) { 3952 switch (Constraint[0]) { 3953 default: break; 3954 case 'b': 3955 case 'r': 3956 case 'f': 3957 case 'v': 3958 case 'y': 3959 return C_RegisterClass; 3960 } 3961 } 3962 return TargetLowering::getConstraintType(Constraint); 3963} 3964 3965std::pair<unsigned, const TargetRegisterClass*> 3966PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3967 MVT::ValueType VT) const { 3968 if (Constraint.size() == 1) { 3969 // GCC RS6000 Constraint Letters 3970 switch (Constraint[0]) { 3971 case 'b': // R1-R31 3972 case 'r': // R0-R31 3973 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 3974 return std::make_pair(0U, PPC::G8RCRegisterClass); 3975 return std::make_pair(0U, PPC::GPRCRegisterClass); 3976 case 'f': 3977 if (VT == MVT::f32) 3978 return std::make_pair(0U, PPC::F4RCRegisterClass); 3979 else if (VT == MVT::f64) 3980 return std::make_pair(0U, PPC::F8RCRegisterClass); 3981 break; 3982 case 'v': 3983 return std::make_pair(0U, PPC::VRRCRegisterClass); 3984 case 'y': // crrc 3985 return std::make_pair(0U, PPC::CRRCRegisterClass); 3986 } 3987 } 3988 3989 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3990} 3991 3992 3993/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3994/// vector. If it is invalid, don't add anything to Ops. 3995void PPCTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, char Letter, 3996 std::vector<SDOperand>&Ops, 3997 SelectionDAG &DAG) { 3998 SDOperand Result(0,0); 3999 switch (Letter) { 4000 default: break; 4001 case 'I': 4002 case 'J': 4003 case 'K': 4004 case 'L': 4005 case 'M': 4006 case 'N': 4007 case 'O': 4008 case 'P': { 4009 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 4010 if (!CST) return; // Must be an immediate to match. 4011 unsigned Value = CST->getValue(); 4012 switch (Letter) { 4013 default: assert(0 && "Unknown constraint letter!"); 4014 case 'I': // "I" is a signed 16-bit constant. 4015 if ((short)Value == (int)Value) 4016 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4017 break; 4018 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 4019 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 4020 if ((short)Value == 0) 4021 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4022 break; 4023 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 4024 if ((Value >> 16) == 0) 4025 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4026 break; 4027 case 'M': // "M" is a constant that is greater than 31. 4028 if (Value > 31) 4029 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4030 break; 4031 case 'N': // "N" is a positive constant that is an exact power of two. 4032 if ((int)Value > 0 && isPowerOf2_32(Value)) 4033 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4034 break; 4035 case 'O': // "O" is the constant zero. 4036 if (Value == 0) 4037 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4038 break; 4039 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 4040 if ((short)-Value == (int)-Value) 4041 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4042 break; 4043 } 4044 break; 4045 } 4046 } 4047 4048 if (Result.Val) { 4049 Ops.push_back(Result); 4050 return; 4051 } 4052 4053 // Handle standard constraint letters. 4054 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG); 4055} 4056 4057// isLegalAddressingMode - Return true if the addressing mode represented 4058// by AM is legal for this target, for a load/store of the specified type. 4059bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 4060 const Type *Ty) const { 4061 // FIXME: PPC does not allow r+i addressing modes for vectors! 4062 4063 // PPC allows a sign-extended 16-bit immediate field. 4064 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 4065 return false; 4066 4067 // No global is ever allowed as a base. 4068 if (AM.BaseGV) 4069 return false; 4070 4071 // PPC only support r+r, 4072 switch (AM.Scale) { 4073 case 0: // "r+i" or just "i", depending on HasBaseReg. 4074 break; 4075 case 1: 4076 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 4077 return false; 4078 // Otherwise we have r+r or r+i. 4079 break; 4080 case 2: 4081 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 4082 return false; 4083 // Allow 2*r as r+r. 4084 break; 4085 default: 4086 // No other scales are supported. 4087 return false; 4088 } 4089 4090 return true; 4091} 4092 4093/// isLegalAddressImmediate - Return true if the integer value can be used 4094/// as the offset of the target addressing mode for load / store of the 4095/// given type. 4096bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{ 4097 // PPC allows a sign-extended 16-bit immediate field. 4098 return (V > -(1 << 16) && V < (1 << 16)-1); 4099} 4100 4101bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 4102 return false; 4103} 4104 4105SDOperand PPCTargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 4106 // Depths > 0 not supported yet! 4107 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4108 return SDOperand(); 4109 4110 MachineFunction &MF = DAG.getMachineFunction(); 4111 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4112 int RAIdx = FuncInfo->getReturnAddrSaveIndex(); 4113 if (RAIdx == 0) { 4114 bool isPPC64 = PPCSubTarget.isPPC64(); 4115 int Offset = 4116 PPCFrameInfo::getReturnSaveOffset(isPPC64, PPCSubTarget.isMachoABI()); 4117 4118 // Set up a frame object for the return address. 4119 RAIdx = MF.getFrameInfo()->CreateFixedObject(isPPC64 ? 8 : 4, Offset); 4120 4121 // Remember it for next time. 4122 FuncInfo->setReturnAddrSaveIndex(RAIdx); 4123 4124 // Make sure the function really does not optimize away the store of the RA 4125 // to the stack. 4126 FuncInfo->setLRStoreRequired(); 4127 } 4128 4129 // Just load the return address off the stack. 4130 SDOperand RetAddrFI = DAG.getFrameIndex(RAIdx, getPointerTy()); 4131 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4132} 4133 4134SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 4135 // Depths > 0 not supported yet! 4136 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4137 return SDOperand(); 4138 4139 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4140 bool isPPC64 = PtrVT == MVT::i64; 4141 4142 MachineFunction &MF = DAG.getMachineFunction(); 4143 MachineFrameInfo *MFI = MF.getFrameInfo(); 4144 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 4145 && MFI->getStackSize(); 4146 4147 if (isPPC64) 4148 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1, 4149 MVT::i64); 4150 else 4151 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1, 4152 MVT::i32); 4153} 4154