CGBuiltin.cpp revision 91b59274439f776cdd545bd7bf5849fdb1842160
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Builtin calls as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/Decl.h"
20#include "clang/Basic/TargetBuiltins.h"
21#include "llvm/Intrinsics.h"
22using namespace clang;
23using namespace CodeGen;
24using namespace llvm;
25
26/// Utility to insert an atomic instruction based on Instrinsic::ID
27/// and the expression node.
28static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
29                               Intrinsic::ID Id, const CallExpr *E) {
30  const llvm::Type *ResType[2];
31  ResType[0] = CGF.ConvertType(E->getType());
32  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
33  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
34  return RValue::get(CGF.Builder.CreateCall2(AtomF,
35                                             CGF.EmitScalarExpr(E->getArg(0)),
36                                             CGF.EmitScalarExpr(E->getArg(1))));
37}
38
39/// Utility to insert an atomic instruction based Instrinsic::ID and
40// the expression node, where the return value is the result of the
41// operation.
42static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
43                                   Intrinsic::ID Id, const CallExpr *E,
44                                   Instruction::BinaryOps Op) {
45  const llvm::Type *ResType[2];
46  ResType[0] = CGF.ConvertType(E->getType());
47  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
48  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
49  Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
50  Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
51  Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
52
53  if (Id == Intrinsic::atomic_load_nand)
54    Result = CGF.Builder.CreateNot(Result);
55
56
57  return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
58}
59
60RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
61                                        unsigned BuiltinID, const CallExpr *E) {
62  // See if we can constant fold this builtin.  If so, don't emit it at all.
63  Expr::EvalResult Result;
64  if (E->Evaluate(Result, CGM.getContext())) {
65    if (Result.Val.isInt())
66      return RValue::get(llvm::ConstantInt::get(VMContext,
67                                                Result.Val.getInt()));
68    else if (Result.Val.isFloat())
69      return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
70  }
71
72  switch (BuiltinID) {
73  default: break;  // Handle intrinsics and libm functions below.
74  case Builtin::BI__builtin___CFStringMakeConstantString:
75    return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
76  case Builtin::BI__builtin_stdarg_start:
77  case Builtin::BI__builtin_va_start:
78  case Builtin::BI__builtin_va_end: {
79    Value *ArgValue = EmitVAListRef(E->getArg(0));
80    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
81    if (ArgValue->getType() != DestType)
82      ArgValue = Builder.CreateBitCast(ArgValue, DestType,
83                                       ArgValue->getName().data());
84
85    Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
86      Intrinsic::vaend : Intrinsic::vastart;
87    return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
88  }
89  case Builtin::BI__builtin_va_copy: {
90    Value *DstPtr = EmitVAListRef(E->getArg(0));
91    Value *SrcPtr = EmitVAListRef(E->getArg(1));
92
93    const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
94
95    DstPtr = Builder.CreateBitCast(DstPtr, Type);
96    SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
97    return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
98                                           DstPtr, SrcPtr));
99  }
100  case Builtin::BI__builtin_abs: {
101    Value *ArgValue = EmitScalarExpr(E->getArg(0));
102
103    Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
104    Value *CmpResult =
105    Builder.CreateICmpSGE(ArgValue,
106                          llvm::Constant::getNullValue(ArgValue->getType()),
107                                                            "abscond");
108    Value *Result =
109      Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
110
111    return RValue::get(Result);
112  }
113  case Builtin::BI__builtin_ctz:
114  case Builtin::BI__builtin_ctzl:
115  case Builtin::BI__builtin_ctzll: {
116    Value *ArgValue = EmitScalarExpr(E->getArg(0));
117
118    const llvm::Type *ArgType = ArgValue->getType();
119    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
120
121    const llvm::Type *ResultType = ConvertType(E->getType());
122    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
123    if (Result->getType() != ResultType)
124      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
125                                     "cast");
126    return RValue::get(Result);
127  }
128  case Builtin::BI__builtin_clz:
129  case Builtin::BI__builtin_clzl:
130  case Builtin::BI__builtin_clzll: {
131    Value *ArgValue = EmitScalarExpr(E->getArg(0));
132
133    const llvm::Type *ArgType = ArgValue->getType();
134    Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
135
136    const llvm::Type *ResultType = ConvertType(E->getType());
137    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
138    if (Result->getType() != ResultType)
139      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
140                                     "cast");
141    return RValue::get(Result);
142  }
143  case Builtin::BI__builtin_ffs:
144  case Builtin::BI__builtin_ffsl:
145  case Builtin::BI__builtin_ffsll: {
146    // ffs(x) -> x ? cttz(x) + 1 : 0
147    Value *ArgValue = EmitScalarExpr(E->getArg(0));
148
149    const llvm::Type *ArgType = ArgValue->getType();
150    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
151
152    const llvm::Type *ResultType = ConvertType(E->getType());
153    Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
154                                   llvm::ConstantInt::get(ArgType, 1), "tmp");
155    Value *Zero = llvm::Constant::getNullValue(ArgType);
156    Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
157    Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
158    if (Result->getType() != ResultType)
159      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
160                                     "cast");
161    return RValue::get(Result);
162  }
163  case Builtin::BI__builtin_parity:
164  case Builtin::BI__builtin_parityl:
165  case Builtin::BI__builtin_parityll: {
166    // parity(x) -> ctpop(x) & 1
167    Value *ArgValue = EmitScalarExpr(E->getArg(0));
168
169    const llvm::Type *ArgType = ArgValue->getType();
170    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
171
172    const llvm::Type *ResultType = ConvertType(E->getType());
173    Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
174    Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
175                                      "tmp");
176    if (Result->getType() != ResultType)
177      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
178                                     "cast");
179    return RValue::get(Result);
180  }
181  case Builtin::BI__builtin_popcount:
182  case Builtin::BI__builtin_popcountl:
183  case Builtin::BI__builtin_popcountll: {
184    Value *ArgValue = EmitScalarExpr(E->getArg(0));
185
186    const llvm::Type *ArgType = ArgValue->getType();
187    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
188
189    const llvm::Type *ResultType = ConvertType(E->getType());
190    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
191    if (Result->getType() != ResultType)
192      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
193                                     "cast");
194    return RValue::get(Result);
195  }
196  case Builtin::BI__builtin_expect:
197    // FIXME: pass expect through to LLVM
198    return RValue::get(EmitScalarExpr(E->getArg(0)));
199  case Builtin::BI__builtin_bswap32:
200  case Builtin::BI__builtin_bswap64: {
201    Value *ArgValue = EmitScalarExpr(E->getArg(0));
202    const llvm::Type *ArgType = ArgValue->getType();
203    Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
204    return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
205  }
206  case Builtin::BI__builtin_object_size: {
207#if 1
208    // We pass this builtin onto the optimizer so that it can
209    // figure out the object size in more complex cases.
210    const llvm::Type *ResType[] = {
211      ConvertType(E->getType())
212    };
213    Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
214    return RValue::get(Builder.CreateCall2(F,
215                                           EmitScalarExpr(E->getArg(0)),
216                                           EmitScalarExpr(E->getArg(1))));
217#else
218    // FIXME: Remove after testing.
219    llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext());
220    const llvm::Type *ResType = ConvertType(E->getType());
221    //    bool UseSubObject = TypeArg.getZExtValue() & 1;
222    bool UseMinimum = TypeArg.getZExtValue() & 2;
223    return RValue::get(
224      llvm::ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
225#endif
226  }
227  case Builtin::BI__builtin_prefetch: {
228    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
229    // FIXME: Technically these constants should of type 'int', yes?
230    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
231      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
232    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
233      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
234    Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
235    return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
236  }
237  case Builtin::BI__builtin_trap: {
238    Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
239    return RValue::get(Builder.CreateCall(F));
240  }
241  case Builtin::BI__builtin_unreachable: {
242    Value *V = Builder.CreateUnreachable();
243    Builder.ClearInsertionPoint();
244    return RValue::get(V);
245  }
246
247  case Builtin::BI__builtin_powi:
248  case Builtin::BI__builtin_powif:
249  case Builtin::BI__builtin_powil: {
250    Value *Base = EmitScalarExpr(E->getArg(0));
251    Value *Exponent = EmitScalarExpr(E->getArg(1));
252    const llvm::Type *ArgType = Base->getType();
253    Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
254    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
255  }
256
257  case Builtin::BI__builtin_isgreater:
258  case Builtin::BI__builtin_isgreaterequal:
259  case Builtin::BI__builtin_isless:
260  case Builtin::BI__builtin_islessequal:
261  case Builtin::BI__builtin_islessgreater:
262  case Builtin::BI__builtin_isunordered: {
263    // Ordered comparisons: we know the arguments to these are matching scalar
264    // floating point values.
265    Value *LHS = EmitScalarExpr(E->getArg(0));
266    Value *RHS = EmitScalarExpr(E->getArg(1));
267
268    switch (BuiltinID) {
269    default: assert(0 && "Unknown ordered comparison");
270    case Builtin::BI__builtin_isgreater:
271      LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
272      break;
273    case Builtin::BI__builtin_isgreaterequal:
274      LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
275      break;
276    case Builtin::BI__builtin_isless:
277      LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
278      break;
279    case Builtin::BI__builtin_islessequal:
280      LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
281      break;
282    case Builtin::BI__builtin_islessgreater:
283      LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
284      break;
285    case Builtin::BI__builtin_isunordered:
286      LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
287      break;
288    }
289    // ZExt bool to int type.
290    return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
291                                          "tmp"));
292  }
293  case Builtin::BI__builtin_isnan: {
294    Value *V = EmitScalarExpr(E->getArg(0));
295    V = Builder.CreateFCmpUNO(V, V, "cmp");
296    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
297  }
298  case Builtin::BIalloca:
299  case Builtin::BI__builtin_alloca: {
300    // FIXME: LLVM IR Should allow alloca with an i64 size!
301    Value *Size = EmitScalarExpr(E->getArg(0));
302    Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
303    return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
304  }
305  case Builtin::BI__builtin_bzero: {
306    Value *Address = EmitScalarExpr(E->getArg(0));
307    Builder.CreateCall4(CGM.getMemSetFn(), Address,
308                        llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
309                        EmitScalarExpr(E->getArg(1)),
310                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
311    return RValue::get(Address);
312  }
313  case Builtin::BI__builtin_memcpy: {
314    Value *Address = EmitScalarExpr(E->getArg(0));
315    Builder.CreateCall4(CGM.getMemCpyFn(), Address,
316                        EmitScalarExpr(E->getArg(1)),
317                        EmitScalarExpr(E->getArg(2)),
318                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
319    return RValue::get(Address);
320  }
321  case Builtin::BI__builtin_memmove: {
322    Value *Address = EmitScalarExpr(E->getArg(0));
323    Builder.CreateCall4(CGM.getMemMoveFn(), Address,
324                        EmitScalarExpr(E->getArg(1)),
325                        EmitScalarExpr(E->getArg(2)),
326                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
327    return RValue::get(Address);
328  }
329  case Builtin::BI__builtin_memset: {
330    Value *Address = EmitScalarExpr(E->getArg(0));
331    Builder.CreateCall4(CGM.getMemSetFn(), Address,
332                        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
333                                            llvm::Type::getInt8Ty(VMContext)),
334                        EmitScalarExpr(E->getArg(2)),
335                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
336    return RValue::get(Address);
337  }
338  case Builtin::BI__builtin_return_address: {
339    Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
340    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
341  }
342  case Builtin::BI__builtin_frame_address: {
343    Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
344    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
345  }
346  case Builtin::BI__builtin_extract_return_addr: {
347    // FIXME: There should be a target hook for this
348    return RValue::get(EmitScalarExpr(E->getArg(0)));
349  }
350  case Builtin::BI__builtin_unwind_init: {
351    Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
352    return RValue::get(Builder.CreateCall(F));
353  }
354#if 0
355  // FIXME: Finish/enable when LLVM backend support stabilizes
356  case Builtin::BI__builtin_setjmp: {
357    Value *Buf = EmitScalarExpr(E->getArg(0));
358    // Store the frame pointer to the buffer
359    Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
360    Value *FrameAddr =
361        Builder.CreateCall(FrameAddrF,
362                           Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
363    Builder.CreateStore(FrameAddr, Buf);
364    // Call the setjmp intrinsic
365    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
366    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
367    Buf = Builder.CreateBitCast(Buf, DestType);
368    return RValue::get(Builder.CreateCall(F, Buf));
369  }
370  case Builtin::BI__builtin_longjmp: {
371    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
372    Value *Buf = EmitScalarExpr(E->getArg(0));
373    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
374    Buf = Builder.CreateBitCast(Buf, DestType);
375    return RValue::get(Builder.CreateCall(F, Buf));
376  }
377#endif
378  case Builtin::BI__sync_fetch_and_add:
379  case Builtin::BI__sync_fetch_and_sub:
380  case Builtin::BI__sync_fetch_and_or:
381  case Builtin::BI__sync_fetch_and_and:
382  case Builtin::BI__sync_fetch_and_xor:
383  case Builtin::BI__sync_add_and_fetch:
384  case Builtin::BI__sync_sub_and_fetch:
385  case Builtin::BI__sync_and_and_fetch:
386  case Builtin::BI__sync_or_and_fetch:
387  case Builtin::BI__sync_xor_and_fetch:
388  case Builtin::BI__sync_val_compare_and_swap:
389  case Builtin::BI__sync_bool_compare_and_swap:
390  case Builtin::BI__sync_lock_test_and_set:
391  case Builtin::BI__sync_lock_release:
392    assert(0 && "Shouldn't make it through sema");
393  case Builtin::BI__sync_fetch_and_add_1:
394  case Builtin::BI__sync_fetch_and_add_2:
395  case Builtin::BI__sync_fetch_and_add_4:
396  case Builtin::BI__sync_fetch_and_add_8:
397  case Builtin::BI__sync_fetch_and_add_16:
398    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
399  case Builtin::BI__sync_fetch_and_sub_1:
400  case Builtin::BI__sync_fetch_and_sub_2:
401  case Builtin::BI__sync_fetch_and_sub_4:
402  case Builtin::BI__sync_fetch_and_sub_8:
403  case Builtin::BI__sync_fetch_and_sub_16:
404    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
405  case Builtin::BI__sync_fetch_and_or_1:
406  case Builtin::BI__sync_fetch_and_or_2:
407  case Builtin::BI__sync_fetch_and_or_4:
408  case Builtin::BI__sync_fetch_and_or_8:
409  case Builtin::BI__sync_fetch_and_or_16:
410    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
411  case Builtin::BI__sync_fetch_and_and_1:
412  case Builtin::BI__sync_fetch_and_and_2:
413  case Builtin::BI__sync_fetch_and_and_4:
414  case Builtin::BI__sync_fetch_and_and_8:
415  case Builtin::BI__sync_fetch_and_and_16:
416    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
417  case Builtin::BI__sync_fetch_and_xor_1:
418  case Builtin::BI__sync_fetch_and_xor_2:
419  case Builtin::BI__sync_fetch_and_xor_4:
420  case Builtin::BI__sync_fetch_and_xor_8:
421  case Builtin::BI__sync_fetch_and_xor_16:
422    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
423  case Builtin::BI__sync_fetch_and_nand_1:
424  case Builtin::BI__sync_fetch_and_nand_2:
425  case Builtin::BI__sync_fetch_and_nand_4:
426  case Builtin::BI__sync_fetch_and_nand_8:
427  case Builtin::BI__sync_fetch_and_nand_16:
428    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
429
430  // Clang extensions: not overloaded yet.
431  case Builtin::BI__sync_fetch_and_min:
432    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
433  case Builtin::BI__sync_fetch_and_max:
434    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
435  case Builtin::BI__sync_fetch_and_umin:
436    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
437  case Builtin::BI__sync_fetch_and_umax:
438    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
439
440  case Builtin::BI__sync_add_and_fetch_1:
441  case Builtin::BI__sync_add_and_fetch_2:
442  case Builtin::BI__sync_add_and_fetch_4:
443  case Builtin::BI__sync_add_and_fetch_8:
444  case Builtin::BI__sync_add_and_fetch_16:
445    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
446                                llvm::Instruction::Add);
447  case Builtin::BI__sync_sub_and_fetch_1:
448  case Builtin::BI__sync_sub_and_fetch_2:
449  case Builtin::BI__sync_sub_and_fetch_4:
450  case Builtin::BI__sync_sub_and_fetch_8:
451  case Builtin::BI__sync_sub_and_fetch_16:
452    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
453                                llvm::Instruction::Sub);
454  case Builtin::BI__sync_and_and_fetch_1:
455  case Builtin::BI__sync_and_and_fetch_2:
456  case Builtin::BI__sync_and_and_fetch_4:
457  case Builtin::BI__sync_and_and_fetch_8:
458  case Builtin::BI__sync_and_and_fetch_16:
459    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
460                                llvm::Instruction::And);
461  case Builtin::BI__sync_or_and_fetch_1:
462  case Builtin::BI__sync_or_and_fetch_2:
463  case Builtin::BI__sync_or_and_fetch_4:
464  case Builtin::BI__sync_or_and_fetch_8:
465  case Builtin::BI__sync_or_and_fetch_16:
466    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
467                                llvm::Instruction::Or);
468  case Builtin::BI__sync_xor_and_fetch_1:
469  case Builtin::BI__sync_xor_and_fetch_2:
470  case Builtin::BI__sync_xor_and_fetch_4:
471  case Builtin::BI__sync_xor_and_fetch_8:
472  case Builtin::BI__sync_xor_and_fetch_16:
473    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
474                                llvm::Instruction::Xor);
475  case Builtin::BI__sync_nand_and_fetch_1:
476  case Builtin::BI__sync_nand_and_fetch_2:
477  case Builtin::BI__sync_nand_and_fetch_4:
478  case Builtin::BI__sync_nand_and_fetch_8:
479  case Builtin::BI__sync_nand_and_fetch_16:
480    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
481                                llvm::Instruction::And);
482
483  case Builtin::BI__sync_val_compare_and_swap_1:
484  case Builtin::BI__sync_val_compare_and_swap_2:
485  case Builtin::BI__sync_val_compare_and_swap_4:
486  case Builtin::BI__sync_val_compare_and_swap_8:
487  case Builtin::BI__sync_val_compare_and_swap_16:
488  {
489    const llvm::Type *ResType[2];
490    ResType[0]= ConvertType(E->getType());
491    ResType[1] = ConvertType(E->getArg(0)->getType());
492    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
493    return RValue::get(Builder.CreateCall3(AtomF,
494                                           EmitScalarExpr(E->getArg(0)),
495                                           EmitScalarExpr(E->getArg(1)),
496                                           EmitScalarExpr(E->getArg(2))));
497  }
498
499  case Builtin::BI__sync_bool_compare_and_swap_1:
500  case Builtin::BI__sync_bool_compare_and_swap_2:
501  case Builtin::BI__sync_bool_compare_and_swap_4:
502  case Builtin::BI__sync_bool_compare_and_swap_8:
503  case Builtin::BI__sync_bool_compare_and_swap_16:
504  {
505    const llvm::Type *ResType[2];
506    ResType[0]= ConvertType(E->getArg(1)->getType());
507    ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
508    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
509    Value *OldVal = EmitScalarExpr(E->getArg(1));
510    Value *PrevVal = Builder.CreateCall3(AtomF,
511                                        EmitScalarExpr(E->getArg(0)),
512                                        OldVal,
513                                        EmitScalarExpr(E->getArg(2)));
514    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
515    // zext bool to int.
516    return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
517  }
518
519  case Builtin::BI__sync_lock_test_and_set_1:
520  case Builtin::BI__sync_lock_test_and_set_2:
521  case Builtin::BI__sync_lock_test_and_set_4:
522  case Builtin::BI__sync_lock_test_and_set_8:
523  case Builtin::BI__sync_lock_test_and_set_16:
524    return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
525  case Builtin::BI__sync_lock_release_1:
526  case Builtin::BI__sync_lock_release_2:
527  case Builtin::BI__sync_lock_release_4:
528  case Builtin::BI__sync_lock_release_8:
529  case Builtin::BI__sync_lock_release_16: {
530    Value *Ptr = EmitScalarExpr(E->getArg(0));
531    const llvm::Type *ElTy =
532      cast<llvm::PointerType>(Ptr->getType())->getElementType();
533    llvm::StoreInst *Store =
534      Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
535    Store->setVolatile(true);
536    return RValue::get(0);
537  }
538
539  case Builtin::BI__sync_synchronize: {
540    Value *C[5];
541    C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1);
542    C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
543    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
544    return RValue::get(0);
545  }
546
547    // Library functions with special handling.
548  case Builtin::BIsqrt:
549  case Builtin::BIsqrtf:
550  case Builtin::BIsqrtl: {
551    // Rewrite sqrt to intrinsic if allowed.
552    if (!FD->hasAttr<ConstAttr>())
553      break;
554    Value *Arg0 = EmitScalarExpr(E->getArg(0));
555    const llvm::Type *ArgType = Arg0->getType();
556    Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
557    return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
558  }
559
560  case Builtin::BIpow:
561  case Builtin::BIpowf:
562  case Builtin::BIpowl: {
563    // Rewrite sqrt to intrinsic if allowed.
564    if (!FD->hasAttr<ConstAttr>())
565      break;
566    Value *Base = EmitScalarExpr(E->getArg(0));
567    Value *Exponent = EmitScalarExpr(E->getArg(1));
568    const llvm::Type *ArgType = Base->getType();
569    Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
570    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
571  }
572  }
573
574  // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
575  // that function.
576  if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
577      getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
578    return EmitCall(CGM.getBuiltinLibFunction(FD, BuiltinID),
579                    E->getCallee()->getType(), E->arg_begin(),
580                    E->arg_end());
581
582  // See if we have a target specific intrinsic.
583  const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
584  Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
585  if (const char *Prefix =
586      llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
587    IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
588
589  if (IntrinsicID != Intrinsic::not_intrinsic) {
590    SmallVector<Value*, 16> Args;
591
592    Function *F = CGM.getIntrinsic(IntrinsicID);
593    const llvm::FunctionType *FTy = F->getFunctionType();
594
595    for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
596      Value *ArgValue = EmitScalarExpr(E->getArg(i));
597
598      // If the intrinsic arg type is different from the builtin arg type
599      // we need to do a bit cast.
600      const llvm::Type *PTy = FTy->getParamType(i);
601      if (PTy != ArgValue->getType()) {
602        assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
603               "Must be able to losslessly bit cast to param");
604        ArgValue = Builder.CreateBitCast(ArgValue, PTy);
605      }
606
607      Args.push_back(ArgValue);
608    }
609
610    Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
611    QualType BuiltinRetType = E->getType();
612
613    const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
614    if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
615
616    if (RetTy != V->getType()) {
617      assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
618             "Must be able to losslessly bit cast result type");
619      V = Builder.CreateBitCast(V, RetTy);
620    }
621
622    return RValue::get(V);
623  }
624
625  // See if we have a target specific builtin that needs to be lowered.
626  if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
627    return RValue::get(V);
628
629  ErrorUnsupported(E, "builtin function");
630
631  // Unknown builtin, for now just dump it out and return undef.
632  if (hasAggregateLLVMType(E->getType()))
633    return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
634  return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
635}
636
637Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
638                                              const CallExpr *E) {
639  switch (Target.getTriple().getArch()) {
640  case llvm::Triple::x86:
641  case llvm::Triple::x86_64:
642    return EmitX86BuiltinExpr(BuiltinID, E);
643  case llvm::Triple::ppc:
644  case llvm::Triple::ppc64:
645    return EmitPPCBuiltinExpr(BuiltinID, E);
646  default:
647    return 0;
648  }
649}
650
651Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
652                                           const CallExpr *E) {
653
654  llvm::SmallVector<Value*, 4> Ops;
655
656  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
657    Ops.push_back(EmitScalarExpr(E->getArg(i)));
658
659  switch (BuiltinID) {
660  default: return 0;
661  case X86::BI__builtin_ia32_pslldi128:
662  case X86::BI__builtin_ia32_psllqi128:
663  case X86::BI__builtin_ia32_psllwi128:
664  case X86::BI__builtin_ia32_psradi128:
665  case X86::BI__builtin_ia32_psrawi128:
666  case X86::BI__builtin_ia32_psrldi128:
667  case X86::BI__builtin_ia32_psrlqi128:
668  case X86::BI__builtin_ia32_psrlwi128: {
669    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
670    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
671    llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
672    Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
673                                         Ops[1], Zero, "insert");
674    Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
675    const char *name = 0;
676    Intrinsic::ID ID = Intrinsic::not_intrinsic;
677
678    switch (BuiltinID) {
679    default: assert(0 && "Unsupported shift intrinsic!");
680    case X86::BI__builtin_ia32_pslldi128:
681      name = "pslldi";
682      ID = Intrinsic::x86_sse2_psll_d;
683      break;
684    case X86::BI__builtin_ia32_psllqi128:
685      name = "psllqi";
686      ID = Intrinsic::x86_sse2_psll_q;
687      break;
688    case X86::BI__builtin_ia32_psllwi128:
689      name = "psllwi";
690      ID = Intrinsic::x86_sse2_psll_w;
691      break;
692    case X86::BI__builtin_ia32_psradi128:
693      name = "psradi";
694      ID = Intrinsic::x86_sse2_psra_d;
695      break;
696    case X86::BI__builtin_ia32_psrawi128:
697      name = "psrawi";
698      ID = Intrinsic::x86_sse2_psra_w;
699      break;
700    case X86::BI__builtin_ia32_psrldi128:
701      name = "psrldi";
702      ID = Intrinsic::x86_sse2_psrl_d;
703      break;
704    case X86::BI__builtin_ia32_psrlqi128:
705      name = "psrlqi";
706      ID = Intrinsic::x86_sse2_psrl_q;
707      break;
708    case X86::BI__builtin_ia32_psrlwi128:
709      name = "psrlwi";
710      ID = Intrinsic::x86_sse2_psrl_w;
711      break;
712    }
713    llvm::Function *F = CGM.getIntrinsic(ID);
714    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
715  }
716  case X86::BI__builtin_ia32_pslldi:
717  case X86::BI__builtin_ia32_psllqi:
718  case X86::BI__builtin_ia32_psllwi:
719  case X86::BI__builtin_ia32_psradi:
720  case X86::BI__builtin_ia32_psrawi:
721  case X86::BI__builtin_ia32_psrldi:
722  case X86::BI__builtin_ia32_psrlqi:
723  case X86::BI__builtin_ia32_psrlwi: {
724    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
725    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
726    Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
727    const char *name = 0;
728    Intrinsic::ID ID = Intrinsic::not_intrinsic;
729
730    switch (BuiltinID) {
731    default: assert(0 && "Unsupported shift intrinsic!");
732    case X86::BI__builtin_ia32_pslldi:
733      name = "pslldi";
734      ID = Intrinsic::x86_mmx_psll_d;
735      break;
736    case X86::BI__builtin_ia32_psllqi:
737      name = "psllqi";
738      ID = Intrinsic::x86_mmx_psll_q;
739      break;
740    case X86::BI__builtin_ia32_psllwi:
741      name = "psllwi";
742      ID = Intrinsic::x86_mmx_psll_w;
743      break;
744    case X86::BI__builtin_ia32_psradi:
745      name = "psradi";
746      ID = Intrinsic::x86_mmx_psra_d;
747      break;
748    case X86::BI__builtin_ia32_psrawi:
749      name = "psrawi";
750      ID = Intrinsic::x86_mmx_psra_w;
751      break;
752    case X86::BI__builtin_ia32_psrldi:
753      name = "psrldi";
754      ID = Intrinsic::x86_mmx_psrl_d;
755      break;
756    case X86::BI__builtin_ia32_psrlqi:
757      name = "psrlqi";
758      ID = Intrinsic::x86_mmx_psrl_q;
759      break;
760    case X86::BI__builtin_ia32_psrlwi:
761      name = "psrlwi";
762      ID = Intrinsic::x86_mmx_psrl_w;
763      break;
764    }
765    llvm::Function *F = CGM.getIntrinsic(ID);
766    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
767  }
768  case X86::BI__builtin_ia32_cmpps: {
769    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
770    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
771  }
772  case X86::BI__builtin_ia32_cmpss: {
773    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
774    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
775  }
776  case X86::BI__builtin_ia32_ldmxcsr: {
777    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
778    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
779    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
780    Builder.CreateStore(Ops[0], Tmp);
781    return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
782                              Builder.CreateBitCast(Tmp, PtrTy));
783  }
784  case X86::BI__builtin_ia32_stmxcsr: {
785    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
786    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
787    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
788    One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
789                             Builder.CreateBitCast(Tmp, PtrTy));
790    return Builder.CreateLoad(Tmp, "stmxcsr");
791  }
792  case X86::BI__builtin_ia32_cmppd: {
793    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
794    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
795  }
796  case X86::BI__builtin_ia32_cmpsd: {
797    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
798    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
799  }
800  case X86::BI__builtin_ia32_storehps:
801  case X86::BI__builtin_ia32_storelps: {
802    const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
803    llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
804    llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
805
806    // cast val v2i64
807    Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
808
809    // extract (0, 1)
810    unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
811    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
812    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
813
814    // cast pointer to i64 & store
815    Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
816    return Builder.CreateStore(Ops[1], Ops[0]);
817  }
818  case X86::BI__builtin_ia32_palignr128:
819  case X86::BI__builtin_ia32_palignr: {
820    Function *F = CGM.getIntrinsic(BuiltinID == X86::BI__builtin_ia32_palignr128 ?
821				   Intrinsic::x86_ssse3_palign_r_128 :
822				   Intrinsic::x86_ssse3_palign_r);
823    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
824  }
825  }
826}
827
828Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
829                                           const CallExpr *E) {
830  switch (BuiltinID) {
831  default: return 0;
832  }
833}
834