CGBuiltin.cpp revision b0b84385f0cb0ea4036579f5f384f1c19b917c7e
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Builtin calls as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/Decl.h"
20#include "clang/Basic/TargetBuiltins.h"
21#include "llvm/Intrinsics.h"
22using namespace clang;
23using namespace CodeGen;
24using namespace llvm;
25
26/// Utility to insert an atomic instruction based on Instrinsic::ID
27/// and the expression node.
28static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
29                               Intrinsic::ID Id, const CallExpr *E) {
30  const llvm::Type *ResType[2];
31  ResType[0] = CGF.ConvertType(E->getType());
32  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
33  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
34  return RValue::get(CGF.Builder.CreateCall2(AtomF,
35                                             CGF.EmitScalarExpr(E->getArg(0)),
36                                             CGF.EmitScalarExpr(E->getArg(1))));
37}
38
39/// Utility to insert an atomic instruction based Instrinsic::ID and
40// the expression node, where the return value is the result of the
41// operation.
42static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
43                                   Intrinsic::ID Id, const CallExpr *E,
44                                   Instruction::BinaryOps Op) {
45  const llvm::Type *ResType[2];
46  ResType[0] = CGF.ConvertType(E->getType());
47  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
48  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
49  Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
50  Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
51  Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
52
53  if (Id == Intrinsic::atomic_load_nand)
54    Result = CGF.Builder.CreateNot(Result);
55
56
57  return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
58}
59
60RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
61                                        unsigned BuiltinID, const CallExpr *E) {
62  // See if we can constant fold this builtin.  If so, don't emit it at all.
63  Expr::EvalResult Result;
64  if (E->Evaluate(Result, CGM.getContext())) {
65    if (Result.Val.isInt())
66      return RValue::get(llvm::ConstantInt::get(VMContext,
67                                                Result.Val.getInt()));
68    else if (Result.Val.isFloat())
69      return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
70  }
71
72  switch (BuiltinID) {
73  default: break;  // Handle intrinsics and libm functions below.
74  case Builtin::BI__builtin___CFStringMakeConstantString:
75    return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
76  case Builtin::BI__builtin_stdarg_start:
77  case Builtin::BI__builtin_va_start:
78  case Builtin::BI__builtin_va_end: {
79    Value *ArgValue = EmitVAListRef(E->getArg(0));
80    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
81    if (ArgValue->getType() != DestType)
82      ArgValue = Builder.CreateBitCast(ArgValue, DestType,
83                                       ArgValue->getName().data());
84
85    Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
86      Intrinsic::vaend : Intrinsic::vastart;
87    return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
88  }
89  case Builtin::BI__builtin_va_copy: {
90    Value *DstPtr = EmitVAListRef(E->getArg(0));
91    Value *SrcPtr = EmitVAListRef(E->getArg(1));
92
93    const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
94
95    DstPtr = Builder.CreateBitCast(DstPtr, Type);
96    SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
97    return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
98                                           DstPtr, SrcPtr));
99  }
100  case Builtin::BI__builtin_abs: {
101    Value *ArgValue = EmitScalarExpr(E->getArg(0));
102
103    Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
104    Value *CmpResult =
105    Builder.CreateICmpSGE(ArgValue,
106                          llvm::Constant::getNullValue(ArgValue->getType()),
107                                                            "abscond");
108    Value *Result =
109      Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
110
111    return RValue::get(Result);
112  }
113  case Builtin::BI__builtin_ctz:
114  case Builtin::BI__builtin_ctzl:
115  case Builtin::BI__builtin_ctzll: {
116    Value *ArgValue = EmitScalarExpr(E->getArg(0));
117
118    const llvm::Type *ArgType = ArgValue->getType();
119    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
120
121    const llvm::Type *ResultType = ConvertType(E->getType());
122    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
123    if (Result->getType() != ResultType)
124      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
125                                     "cast");
126    return RValue::get(Result);
127  }
128  case Builtin::BI__builtin_clz:
129  case Builtin::BI__builtin_clzl:
130  case Builtin::BI__builtin_clzll: {
131    Value *ArgValue = EmitScalarExpr(E->getArg(0));
132
133    const llvm::Type *ArgType = ArgValue->getType();
134    Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
135
136    const llvm::Type *ResultType = ConvertType(E->getType());
137    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
138    if (Result->getType() != ResultType)
139      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
140                                     "cast");
141    return RValue::get(Result);
142  }
143  case Builtin::BI__builtin_ffs:
144  case Builtin::BI__builtin_ffsl:
145  case Builtin::BI__builtin_ffsll: {
146    // ffs(x) -> x ? cttz(x) + 1 : 0
147    Value *ArgValue = EmitScalarExpr(E->getArg(0));
148
149    const llvm::Type *ArgType = ArgValue->getType();
150    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
151
152    const llvm::Type *ResultType = ConvertType(E->getType());
153    Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
154                                   llvm::ConstantInt::get(ArgType, 1), "tmp");
155    Value *Zero = llvm::Constant::getNullValue(ArgType);
156    Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
157    Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
158    if (Result->getType() != ResultType)
159      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
160                                     "cast");
161    return RValue::get(Result);
162  }
163  case Builtin::BI__builtin_parity:
164  case Builtin::BI__builtin_parityl:
165  case Builtin::BI__builtin_parityll: {
166    // parity(x) -> ctpop(x) & 1
167    Value *ArgValue = EmitScalarExpr(E->getArg(0));
168
169    const llvm::Type *ArgType = ArgValue->getType();
170    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
171
172    const llvm::Type *ResultType = ConvertType(E->getType());
173    Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
174    Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
175                                      "tmp");
176    if (Result->getType() != ResultType)
177      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
178                                     "cast");
179    return RValue::get(Result);
180  }
181  case Builtin::BI__builtin_popcount:
182  case Builtin::BI__builtin_popcountl:
183  case Builtin::BI__builtin_popcountll: {
184    Value *ArgValue = EmitScalarExpr(E->getArg(0));
185
186    const llvm::Type *ArgType = ArgValue->getType();
187    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
188
189    const llvm::Type *ResultType = ConvertType(E->getType());
190    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
191    if (Result->getType() != ResultType)
192      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
193                                     "cast");
194    return RValue::get(Result);
195  }
196  case Builtin::BI__builtin_expect:
197    // FIXME: pass expect through to LLVM
198    return RValue::get(EmitScalarExpr(E->getArg(0)));
199  case Builtin::BI__builtin_bswap32:
200  case Builtin::BI__builtin_bswap64: {
201    Value *ArgValue = EmitScalarExpr(E->getArg(0));
202    const llvm::Type *ArgType = ArgValue->getType();
203    Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
204    return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
205  }
206  case Builtin::BI__builtin_object_size: {
207    // We pass this builtin onto the optimizer so that it can
208    // figure out the object size in more complex cases.
209    const llvm::Type *ResType[] = {
210      ConvertType(E->getType())
211    };
212    Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
213    return RValue::get(Builder.CreateCall2(F,
214                                           EmitScalarExpr(E->getArg(0)),
215                                           EmitScalarExpr(E->getArg(1))));
216  }
217  case Builtin::BI__builtin_prefetch: {
218    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
219    // FIXME: Technically these constants should of type 'int', yes?
220    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
221      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
222    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
223      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
224    Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
225    return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
226  }
227  case Builtin::BI__builtin_trap: {
228    Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
229    return RValue::get(Builder.CreateCall(F));
230  }
231  case Builtin::BI__builtin_unreachable: {
232    if (CatchUndefined && HaveInsertPoint())
233      EmitBranch(getTrapBB());
234    Value *V = Builder.CreateUnreachable();
235    Builder.ClearInsertionPoint();
236    return RValue::get(V);
237  }
238
239  case Builtin::BI__builtin_powi:
240  case Builtin::BI__builtin_powif:
241  case Builtin::BI__builtin_powil: {
242    Value *Base = EmitScalarExpr(E->getArg(0));
243    Value *Exponent = EmitScalarExpr(E->getArg(1));
244    const llvm::Type *ArgType = Base->getType();
245    Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
246    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
247  }
248
249  case Builtin::BI__builtin_isgreater:
250  case Builtin::BI__builtin_isgreaterequal:
251  case Builtin::BI__builtin_isless:
252  case Builtin::BI__builtin_islessequal:
253  case Builtin::BI__builtin_islessgreater:
254  case Builtin::BI__builtin_isunordered: {
255    // Ordered comparisons: we know the arguments to these are matching scalar
256    // floating point values.
257    Value *LHS = EmitScalarExpr(E->getArg(0));
258    Value *RHS = EmitScalarExpr(E->getArg(1));
259
260    switch (BuiltinID) {
261    default: assert(0 && "Unknown ordered comparison");
262    case Builtin::BI__builtin_isgreater:
263      LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
264      break;
265    case Builtin::BI__builtin_isgreaterequal:
266      LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
267      break;
268    case Builtin::BI__builtin_isless:
269      LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
270      break;
271    case Builtin::BI__builtin_islessequal:
272      LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
273      break;
274    case Builtin::BI__builtin_islessgreater:
275      LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
276      break;
277    case Builtin::BI__builtin_isunordered:
278      LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
279      break;
280    }
281    // ZExt bool to int type.
282    return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
283                                          "tmp"));
284  }
285  case Builtin::BI__builtin_isnan: {
286    Value *V = EmitScalarExpr(E->getArg(0));
287    V = Builder.CreateFCmpUNO(V, V, "cmp");
288    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
289  }
290  case Builtin::BIalloca:
291  case Builtin::BI__builtin_alloca: {
292    // FIXME: LLVM IR Should allow alloca with an i64 size!
293    Value *Size = EmitScalarExpr(E->getArg(0));
294    Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
295    return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
296  }
297  case Builtin::BI__builtin_bzero: {
298    Value *Address = EmitScalarExpr(E->getArg(0));
299    Builder.CreateCall4(CGM.getMemSetFn(), Address,
300                        llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
301                        EmitScalarExpr(E->getArg(1)),
302                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
303    return RValue::get(Address);
304  }
305  case Builtin::BImemcpy:
306  case Builtin::BI__builtin_memcpy: {
307    Value *Address = EmitScalarExpr(E->getArg(0));
308    Builder.CreateCall4(CGM.getMemCpyFn(), Address,
309                        EmitScalarExpr(E->getArg(1)),
310                        EmitScalarExpr(E->getArg(2)),
311                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
312    return RValue::get(Address);
313  }
314  case Builtin::BImemmove:
315  case Builtin::BI__builtin_memmove: {
316    Value *Address = EmitScalarExpr(E->getArg(0));
317    Builder.CreateCall4(CGM.getMemMoveFn(), Address,
318                        EmitScalarExpr(E->getArg(1)),
319                        EmitScalarExpr(E->getArg(2)),
320                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
321    return RValue::get(Address);
322  }
323  case Builtin::BImemset:
324  case Builtin::BI__builtin_memset: {
325    Value *Address = EmitScalarExpr(E->getArg(0));
326    Builder.CreateCall4(CGM.getMemSetFn(), Address,
327                        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
328                                            llvm::Type::getInt8Ty(VMContext)),
329                        EmitScalarExpr(E->getArg(2)),
330                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
331    return RValue::get(Address);
332  }
333  case Builtin::BI__builtin_return_address: {
334    Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
335    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
336  }
337  case Builtin::BI__builtin_frame_address: {
338    Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
339    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
340  }
341  case Builtin::BI__builtin_extract_return_addr: {
342    // FIXME: There should be a target hook for this
343    return RValue::get(EmitScalarExpr(E->getArg(0)));
344  }
345  case Builtin::BI__builtin_unwind_init: {
346    Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
347    return RValue::get(Builder.CreateCall(F));
348  }
349#if 0
350  // FIXME: Finish/enable when LLVM backend support stabilizes
351  case Builtin::BI__builtin_setjmp: {
352    Value *Buf = EmitScalarExpr(E->getArg(0));
353    // Store the frame pointer to the buffer
354    Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
355    Value *FrameAddr =
356        Builder.CreateCall(FrameAddrF,
357                           Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
358    Builder.CreateStore(FrameAddr, Buf);
359    // Call the setjmp intrinsic
360    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
361    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
362    Buf = Builder.CreateBitCast(Buf, DestType);
363    return RValue::get(Builder.CreateCall(F, Buf));
364  }
365  case Builtin::BI__builtin_longjmp: {
366    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
367    Value *Buf = EmitScalarExpr(E->getArg(0));
368    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
369    Buf = Builder.CreateBitCast(Buf, DestType);
370    return RValue::get(Builder.CreateCall(F, Buf));
371  }
372#endif
373  case Builtin::BI__sync_fetch_and_add:
374  case Builtin::BI__sync_fetch_and_sub:
375  case Builtin::BI__sync_fetch_and_or:
376  case Builtin::BI__sync_fetch_and_and:
377  case Builtin::BI__sync_fetch_and_xor:
378  case Builtin::BI__sync_add_and_fetch:
379  case Builtin::BI__sync_sub_and_fetch:
380  case Builtin::BI__sync_and_and_fetch:
381  case Builtin::BI__sync_or_and_fetch:
382  case Builtin::BI__sync_xor_and_fetch:
383  case Builtin::BI__sync_val_compare_and_swap:
384  case Builtin::BI__sync_bool_compare_and_swap:
385  case Builtin::BI__sync_lock_test_and_set:
386  case Builtin::BI__sync_lock_release:
387    assert(0 && "Shouldn't make it through sema");
388  case Builtin::BI__sync_fetch_and_add_1:
389  case Builtin::BI__sync_fetch_and_add_2:
390  case Builtin::BI__sync_fetch_and_add_4:
391  case Builtin::BI__sync_fetch_and_add_8:
392  case Builtin::BI__sync_fetch_and_add_16:
393    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
394  case Builtin::BI__sync_fetch_and_sub_1:
395  case Builtin::BI__sync_fetch_and_sub_2:
396  case Builtin::BI__sync_fetch_and_sub_4:
397  case Builtin::BI__sync_fetch_and_sub_8:
398  case Builtin::BI__sync_fetch_and_sub_16:
399    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
400  case Builtin::BI__sync_fetch_and_or_1:
401  case Builtin::BI__sync_fetch_and_or_2:
402  case Builtin::BI__sync_fetch_and_or_4:
403  case Builtin::BI__sync_fetch_and_or_8:
404  case Builtin::BI__sync_fetch_and_or_16:
405    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
406  case Builtin::BI__sync_fetch_and_and_1:
407  case Builtin::BI__sync_fetch_and_and_2:
408  case Builtin::BI__sync_fetch_and_and_4:
409  case Builtin::BI__sync_fetch_and_and_8:
410  case Builtin::BI__sync_fetch_and_and_16:
411    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
412  case Builtin::BI__sync_fetch_and_xor_1:
413  case Builtin::BI__sync_fetch_and_xor_2:
414  case Builtin::BI__sync_fetch_and_xor_4:
415  case Builtin::BI__sync_fetch_and_xor_8:
416  case Builtin::BI__sync_fetch_and_xor_16:
417    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
418  case Builtin::BI__sync_fetch_and_nand_1:
419  case Builtin::BI__sync_fetch_and_nand_2:
420  case Builtin::BI__sync_fetch_and_nand_4:
421  case Builtin::BI__sync_fetch_and_nand_8:
422  case Builtin::BI__sync_fetch_and_nand_16:
423    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
424
425  // Clang extensions: not overloaded yet.
426  case Builtin::BI__sync_fetch_and_min:
427    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
428  case Builtin::BI__sync_fetch_and_max:
429    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
430  case Builtin::BI__sync_fetch_and_umin:
431    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
432  case Builtin::BI__sync_fetch_and_umax:
433    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
434
435  case Builtin::BI__sync_add_and_fetch_1:
436  case Builtin::BI__sync_add_and_fetch_2:
437  case Builtin::BI__sync_add_and_fetch_4:
438  case Builtin::BI__sync_add_and_fetch_8:
439  case Builtin::BI__sync_add_and_fetch_16:
440    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
441                                llvm::Instruction::Add);
442  case Builtin::BI__sync_sub_and_fetch_1:
443  case Builtin::BI__sync_sub_and_fetch_2:
444  case Builtin::BI__sync_sub_and_fetch_4:
445  case Builtin::BI__sync_sub_and_fetch_8:
446  case Builtin::BI__sync_sub_and_fetch_16:
447    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
448                                llvm::Instruction::Sub);
449  case Builtin::BI__sync_and_and_fetch_1:
450  case Builtin::BI__sync_and_and_fetch_2:
451  case Builtin::BI__sync_and_and_fetch_4:
452  case Builtin::BI__sync_and_and_fetch_8:
453  case Builtin::BI__sync_and_and_fetch_16:
454    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
455                                llvm::Instruction::And);
456  case Builtin::BI__sync_or_and_fetch_1:
457  case Builtin::BI__sync_or_and_fetch_2:
458  case Builtin::BI__sync_or_and_fetch_4:
459  case Builtin::BI__sync_or_and_fetch_8:
460  case Builtin::BI__sync_or_and_fetch_16:
461    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
462                                llvm::Instruction::Or);
463  case Builtin::BI__sync_xor_and_fetch_1:
464  case Builtin::BI__sync_xor_and_fetch_2:
465  case Builtin::BI__sync_xor_and_fetch_4:
466  case Builtin::BI__sync_xor_and_fetch_8:
467  case Builtin::BI__sync_xor_and_fetch_16:
468    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
469                                llvm::Instruction::Xor);
470  case Builtin::BI__sync_nand_and_fetch_1:
471  case Builtin::BI__sync_nand_and_fetch_2:
472  case Builtin::BI__sync_nand_and_fetch_4:
473  case Builtin::BI__sync_nand_and_fetch_8:
474  case Builtin::BI__sync_nand_and_fetch_16:
475    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
476                                llvm::Instruction::And);
477
478  case Builtin::BI__sync_val_compare_and_swap_1:
479  case Builtin::BI__sync_val_compare_and_swap_2:
480  case Builtin::BI__sync_val_compare_and_swap_4:
481  case Builtin::BI__sync_val_compare_and_swap_8:
482  case Builtin::BI__sync_val_compare_and_swap_16:
483  {
484    const llvm::Type *ResType[2];
485    ResType[0]= ConvertType(E->getType());
486    ResType[1] = ConvertType(E->getArg(0)->getType());
487    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
488    return RValue::get(Builder.CreateCall3(AtomF,
489                                           EmitScalarExpr(E->getArg(0)),
490                                           EmitScalarExpr(E->getArg(1)),
491                                           EmitScalarExpr(E->getArg(2))));
492  }
493
494  case Builtin::BI__sync_bool_compare_and_swap_1:
495  case Builtin::BI__sync_bool_compare_and_swap_2:
496  case Builtin::BI__sync_bool_compare_and_swap_4:
497  case Builtin::BI__sync_bool_compare_and_swap_8:
498  case Builtin::BI__sync_bool_compare_and_swap_16:
499  {
500    const llvm::Type *ResType[2];
501    ResType[0]= ConvertType(E->getArg(1)->getType());
502    ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
503    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
504    Value *OldVal = EmitScalarExpr(E->getArg(1));
505    Value *PrevVal = Builder.CreateCall3(AtomF,
506                                        EmitScalarExpr(E->getArg(0)),
507                                        OldVal,
508                                        EmitScalarExpr(E->getArg(2)));
509    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
510    // zext bool to int.
511    return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
512  }
513
514  case Builtin::BI__sync_lock_test_and_set_1:
515  case Builtin::BI__sync_lock_test_and_set_2:
516  case Builtin::BI__sync_lock_test_and_set_4:
517  case Builtin::BI__sync_lock_test_and_set_8:
518  case Builtin::BI__sync_lock_test_and_set_16:
519    return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
520  case Builtin::BI__sync_lock_release_1:
521  case Builtin::BI__sync_lock_release_2:
522  case Builtin::BI__sync_lock_release_4:
523  case Builtin::BI__sync_lock_release_8:
524  case Builtin::BI__sync_lock_release_16: {
525    Value *Ptr = EmitScalarExpr(E->getArg(0));
526    const llvm::Type *ElTy =
527      cast<llvm::PointerType>(Ptr->getType())->getElementType();
528    llvm::StoreInst *Store =
529      Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
530    Store->setVolatile(true);
531    return RValue::get(0);
532  }
533
534  case Builtin::BI__sync_synchronize: {
535    Value *C[5];
536    C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1);
537    C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
538    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
539    return RValue::get(0);
540  }
541
542    // Library functions with special handling.
543  case Builtin::BIsqrt:
544  case Builtin::BIsqrtf:
545  case Builtin::BIsqrtl: {
546    // Rewrite sqrt to intrinsic if allowed.
547    if (!FD->hasAttr<ConstAttr>())
548      break;
549    Value *Arg0 = EmitScalarExpr(E->getArg(0));
550    const llvm::Type *ArgType = Arg0->getType();
551    Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
552    return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
553  }
554
555  case Builtin::BIpow:
556  case Builtin::BIpowf:
557  case Builtin::BIpowl: {
558    // Rewrite sqrt to intrinsic if allowed.
559    if (!FD->hasAttr<ConstAttr>())
560      break;
561    Value *Base = EmitScalarExpr(E->getArg(0));
562    Value *Exponent = EmitScalarExpr(E->getArg(1));
563    const llvm::Type *ArgType = Base->getType();
564    Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
565    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
566  }
567  }
568
569  // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
570  // that function.
571  if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
572      getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
573    return EmitCall(CGM.getBuiltinLibFunction(FD, BuiltinID),
574                    E->getCallee()->getType(), E->arg_begin(),
575                    E->arg_end());
576
577  // See if we have a target specific intrinsic.
578  const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
579  Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
580  if (const char *Prefix =
581      llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
582    IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
583
584  if (IntrinsicID != Intrinsic::not_intrinsic) {
585    SmallVector<Value*, 16> Args;
586
587    Function *F = CGM.getIntrinsic(IntrinsicID);
588    const llvm::FunctionType *FTy = F->getFunctionType();
589
590    for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
591      Value *ArgValue = EmitScalarExpr(E->getArg(i));
592
593      // If the intrinsic arg type is different from the builtin arg type
594      // we need to do a bit cast.
595      const llvm::Type *PTy = FTy->getParamType(i);
596      if (PTy != ArgValue->getType()) {
597        assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
598               "Must be able to losslessly bit cast to param");
599        ArgValue = Builder.CreateBitCast(ArgValue, PTy);
600      }
601
602      Args.push_back(ArgValue);
603    }
604
605    Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
606    QualType BuiltinRetType = E->getType();
607
608    const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
609    if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
610
611    if (RetTy != V->getType()) {
612      assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
613             "Must be able to losslessly bit cast result type");
614      V = Builder.CreateBitCast(V, RetTy);
615    }
616
617    return RValue::get(V);
618  }
619
620  // See if we have a target specific builtin that needs to be lowered.
621  if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
622    return RValue::get(V);
623
624  ErrorUnsupported(E, "builtin function");
625
626  // Unknown builtin, for now just dump it out and return undef.
627  if (hasAggregateLLVMType(E->getType()))
628    return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
629  return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
630}
631
632Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
633                                              const CallExpr *E) {
634  switch (Target.getTriple().getArch()) {
635  case llvm::Triple::x86:
636  case llvm::Triple::x86_64:
637    return EmitX86BuiltinExpr(BuiltinID, E);
638  case llvm::Triple::ppc:
639  case llvm::Triple::ppc64:
640    return EmitPPCBuiltinExpr(BuiltinID, E);
641  default:
642    return 0;
643  }
644}
645
646Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
647                                           const CallExpr *E) {
648
649  llvm::SmallVector<Value*, 4> Ops;
650
651  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
652    Ops.push_back(EmitScalarExpr(E->getArg(i)));
653
654  switch (BuiltinID) {
655  default: return 0;
656  case X86::BI__builtin_ia32_pslldi128:
657  case X86::BI__builtin_ia32_psllqi128:
658  case X86::BI__builtin_ia32_psllwi128:
659  case X86::BI__builtin_ia32_psradi128:
660  case X86::BI__builtin_ia32_psrawi128:
661  case X86::BI__builtin_ia32_psrldi128:
662  case X86::BI__builtin_ia32_psrlqi128:
663  case X86::BI__builtin_ia32_psrlwi128: {
664    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
665    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
666    llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
667    Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
668                                         Ops[1], Zero, "insert");
669    Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
670    const char *name = 0;
671    Intrinsic::ID ID = Intrinsic::not_intrinsic;
672
673    switch (BuiltinID) {
674    default: assert(0 && "Unsupported shift intrinsic!");
675    case X86::BI__builtin_ia32_pslldi128:
676      name = "pslldi";
677      ID = Intrinsic::x86_sse2_psll_d;
678      break;
679    case X86::BI__builtin_ia32_psllqi128:
680      name = "psllqi";
681      ID = Intrinsic::x86_sse2_psll_q;
682      break;
683    case X86::BI__builtin_ia32_psllwi128:
684      name = "psllwi";
685      ID = Intrinsic::x86_sse2_psll_w;
686      break;
687    case X86::BI__builtin_ia32_psradi128:
688      name = "psradi";
689      ID = Intrinsic::x86_sse2_psra_d;
690      break;
691    case X86::BI__builtin_ia32_psrawi128:
692      name = "psrawi";
693      ID = Intrinsic::x86_sse2_psra_w;
694      break;
695    case X86::BI__builtin_ia32_psrldi128:
696      name = "psrldi";
697      ID = Intrinsic::x86_sse2_psrl_d;
698      break;
699    case X86::BI__builtin_ia32_psrlqi128:
700      name = "psrlqi";
701      ID = Intrinsic::x86_sse2_psrl_q;
702      break;
703    case X86::BI__builtin_ia32_psrlwi128:
704      name = "psrlwi";
705      ID = Intrinsic::x86_sse2_psrl_w;
706      break;
707    }
708    llvm::Function *F = CGM.getIntrinsic(ID);
709    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
710  }
711  case X86::BI__builtin_ia32_pslldi:
712  case X86::BI__builtin_ia32_psllqi:
713  case X86::BI__builtin_ia32_psllwi:
714  case X86::BI__builtin_ia32_psradi:
715  case X86::BI__builtin_ia32_psrawi:
716  case X86::BI__builtin_ia32_psrldi:
717  case X86::BI__builtin_ia32_psrlqi:
718  case X86::BI__builtin_ia32_psrlwi: {
719    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
720    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
721    Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
722    const char *name = 0;
723    Intrinsic::ID ID = Intrinsic::not_intrinsic;
724
725    switch (BuiltinID) {
726    default: assert(0 && "Unsupported shift intrinsic!");
727    case X86::BI__builtin_ia32_pslldi:
728      name = "pslldi";
729      ID = Intrinsic::x86_mmx_psll_d;
730      break;
731    case X86::BI__builtin_ia32_psllqi:
732      name = "psllqi";
733      ID = Intrinsic::x86_mmx_psll_q;
734      break;
735    case X86::BI__builtin_ia32_psllwi:
736      name = "psllwi";
737      ID = Intrinsic::x86_mmx_psll_w;
738      break;
739    case X86::BI__builtin_ia32_psradi:
740      name = "psradi";
741      ID = Intrinsic::x86_mmx_psra_d;
742      break;
743    case X86::BI__builtin_ia32_psrawi:
744      name = "psrawi";
745      ID = Intrinsic::x86_mmx_psra_w;
746      break;
747    case X86::BI__builtin_ia32_psrldi:
748      name = "psrldi";
749      ID = Intrinsic::x86_mmx_psrl_d;
750      break;
751    case X86::BI__builtin_ia32_psrlqi:
752      name = "psrlqi";
753      ID = Intrinsic::x86_mmx_psrl_q;
754      break;
755    case X86::BI__builtin_ia32_psrlwi:
756      name = "psrlwi";
757      ID = Intrinsic::x86_mmx_psrl_w;
758      break;
759    }
760    llvm::Function *F = CGM.getIntrinsic(ID);
761    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
762  }
763  case X86::BI__builtin_ia32_cmpps: {
764    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
765    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
766  }
767  case X86::BI__builtin_ia32_cmpss: {
768    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
769    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
770  }
771  case X86::BI__builtin_ia32_ldmxcsr: {
772    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
773    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
774    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
775    Builder.CreateStore(Ops[0], Tmp);
776    return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
777                              Builder.CreateBitCast(Tmp, PtrTy));
778  }
779  case X86::BI__builtin_ia32_stmxcsr: {
780    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
781    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
782    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
783    One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
784                             Builder.CreateBitCast(Tmp, PtrTy));
785    return Builder.CreateLoad(Tmp, "stmxcsr");
786  }
787  case X86::BI__builtin_ia32_cmppd: {
788    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
789    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
790  }
791  case X86::BI__builtin_ia32_cmpsd: {
792    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
793    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
794  }
795  case X86::BI__builtin_ia32_storehps:
796  case X86::BI__builtin_ia32_storelps: {
797    const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
798    llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
799    llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
800
801    // cast val v2i64
802    Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
803
804    // extract (0, 1)
805    unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
806    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
807    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
808
809    // cast pointer to i64 & store
810    Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
811    return Builder.CreateStore(Ops[1], Ops[0]);
812  }
813  case X86::BI__builtin_ia32_palignr: {
814    Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
815    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
816  }
817  case X86::BI__builtin_ia32_palignr128: {
818    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
819
820    // If palignr is shifting the pair of input vectors less than 17 bytes,
821    // emit a shuffle instruction.
822    if (shiftVal <= 16) {
823      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
824
825      llvm::SmallVector<llvm::Constant*, 16> Indices;
826      for (unsigned i = 0; i != 16; ++i)
827        Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
828
829      Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
830      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
831    }
832
833    // If palignr is shifting the pair of input vectors more than 16 but less
834    // than 32 bytes, emit a logical right shift of the destination.
835    if (shiftVal < 32) {
836      const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
837      const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
838      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
839
840      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
841      Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
842
843      // create i32 constant
844      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
845      return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
846    }
847
848    // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
849    return llvm::Constant::getNullValue(ConvertType(E->getType()));
850  }
851  }
852}
853
854Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
855                                           const CallExpr *E) {
856  return 0;
857}
858