CGBuiltin.cpp revision fba565d044a8979cfd916ce52655a6847bfaa601
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Builtin calls as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/Decl.h"
20#include "clang/Basic/TargetBuiltins.h"
21#include "llvm/Intrinsics.h"
22using namespace clang;
23using namespace CodeGen;
24using namespace llvm;
25
26/// Utility to insert an atomic instruction based on Instrinsic::ID
27/// and the expression node.
28static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
29                               Intrinsic::ID Id, const CallExpr *E) {
30  const llvm::Type *ResType[2];
31  ResType[0] = CGF.ConvertType(E->getType());
32  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
33  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
34  return RValue::get(CGF.Builder.CreateCall2(AtomF,
35                                             CGF.EmitScalarExpr(E->getArg(0)),
36                                             CGF.EmitScalarExpr(E->getArg(1))));
37}
38
39/// Utility to insert an atomic instruction based Instrinsic::ID and
40// the expression node, where the return value is the result of the
41// operation.
42static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
43                                   Intrinsic::ID Id, const CallExpr *E,
44                                   Instruction::BinaryOps Op) {
45  const llvm::Type *ResType[2];
46  ResType[0] = CGF.ConvertType(E->getType());
47  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
48  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
49  Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
50  Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
51  Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
52
53  if (Id == Intrinsic::atomic_load_nand)
54    Result = CGF.Builder.CreateNot(Result);
55
56
57  return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
58}
59
60RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
61                                        unsigned BuiltinID, const CallExpr *E) {
62  // See if we can constant fold this builtin.  If so, don't emit it at all.
63  Expr::EvalResult Result;
64  if (E->Evaluate(Result, CGM.getContext())) {
65    if (Result.Val.isInt())
66      return RValue::get(llvm::ConstantInt::get(VMContext,
67                                                Result.Val.getInt()));
68    else if (Result.Val.isFloat())
69      return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
70  }
71
72  switch (BuiltinID) {
73  default: break;  // Handle intrinsics and libm functions below.
74  case Builtin::BI__builtin___CFStringMakeConstantString:
75    return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
76  case Builtin::BI__builtin_stdarg_start:
77  case Builtin::BI__builtin_va_start:
78  case Builtin::BI__builtin_va_end: {
79    Value *ArgValue = EmitVAListRef(E->getArg(0));
80    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
81    if (ArgValue->getType() != DestType)
82      ArgValue = Builder.CreateBitCast(ArgValue, DestType,
83                                       ArgValue->getName().data());
84
85    Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
86      Intrinsic::vaend : Intrinsic::vastart;
87    return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
88  }
89  case Builtin::BI__builtin_va_copy: {
90    Value *DstPtr = EmitVAListRef(E->getArg(0));
91    Value *SrcPtr = EmitVAListRef(E->getArg(1));
92
93    const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
94
95    DstPtr = Builder.CreateBitCast(DstPtr, Type);
96    SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
97    return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
98                                           DstPtr, SrcPtr));
99  }
100  case Builtin::BI__builtin_abs: {
101    Value *ArgValue = EmitScalarExpr(E->getArg(0));
102
103    Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
104    Value *CmpResult =
105    Builder.CreateICmpSGE(ArgValue,
106                          llvm::Constant::getNullValue(ArgValue->getType()),
107                                                            "abscond");
108    Value *Result =
109      Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
110
111    return RValue::get(Result);
112  }
113  case Builtin::BI__builtin_ctz:
114  case Builtin::BI__builtin_ctzl:
115  case Builtin::BI__builtin_ctzll: {
116    Value *ArgValue = EmitScalarExpr(E->getArg(0));
117
118    const llvm::Type *ArgType = ArgValue->getType();
119    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
120
121    const llvm::Type *ResultType = ConvertType(E->getType());
122    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
123    if (Result->getType() != ResultType)
124      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
125                                     "cast");
126    return RValue::get(Result);
127  }
128  case Builtin::BI__builtin_clz:
129  case Builtin::BI__builtin_clzl:
130  case Builtin::BI__builtin_clzll: {
131    Value *ArgValue = EmitScalarExpr(E->getArg(0));
132
133    const llvm::Type *ArgType = ArgValue->getType();
134    Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
135
136    const llvm::Type *ResultType = ConvertType(E->getType());
137    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
138    if (Result->getType() != ResultType)
139      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
140                                     "cast");
141    return RValue::get(Result);
142  }
143  case Builtin::BI__builtin_ffs:
144  case Builtin::BI__builtin_ffsl:
145  case Builtin::BI__builtin_ffsll: {
146    // ffs(x) -> x ? cttz(x) + 1 : 0
147    Value *ArgValue = EmitScalarExpr(E->getArg(0));
148
149    const llvm::Type *ArgType = ArgValue->getType();
150    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
151
152    const llvm::Type *ResultType = ConvertType(E->getType());
153    Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
154                                   llvm::ConstantInt::get(ArgType, 1), "tmp");
155    Value *Zero = llvm::Constant::getNullValue(ArgType);
156    Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
157    Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
158    if (Result->getType() != ResultType)
159      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
160                                     "cast");
161    return RValue::get(Result);
162  }
163  case Builtin::BI__builtin_parity:
164  case Builtin::BI__builtin_parityl:
165  case Builtin::BI__builtin_parityll: {
166    // parity(x) -> ctpop(x) & 1
167    Value *ArgValue = EmitScalarExpr(E->getArg(0));
168
169    const llvm::Type *ArgType = ArgValue->getType();
170    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
171
172    const llvm::Type *ResultType = ConvertType(E->getType());
173    Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
174    Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
175                                      "tmp");
176    if (Result->getType() != ResultType)
177      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
178                                     "cast");
179    return RValue::get(Result);
180  }
181  case Builtin::BI__builtin_popcount:
182  case Builtin::BI__builtin_popcountl:
183  case Builtin::BI__builtin_popcountll: {
184    Value *ArgValue = EmitScalarExpr(E->getArg(0));
185
186    const llvm::Type *ArgType = ArgValue->getType();
187    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
188
189    const llvm::Type *ResultType = ConvertType(E->getType());
190    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
191    if (Result->getType() != ResultType)
192      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
193                                     "cast");
194    return RValue::get(Result);
195  }
196  case Builtin::BI__builtin_expect:
197    // FIXME: pass expect through to LLVM
198    return RValue::get(EmitScalarExpr(E->getArg(0)));
199  case Builtin::BI__builtin_bswap32:
200  case Builtin::BI__builtin_bswap64: {
201    Value *ArgValue = EmitScalarExpr(E->getArg(0));
202    const llvm::Type *ArgType = ArgValue->getType();
203    Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
204    return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
205  }
206  case Builtin::BI__builtin_object_size: {
207    // We pass this builtin onto the optimizer so that it can
208    // figure out the object size in more complex cases.
209    const llvm::Type *ResType[] = {
210      ConvertType(E->getType())
211    };
212    Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
213    return RValue::get(Builder.CreateCall2(F,
214                                           EmitScalarExpr(E->getArg(0)),
215                                           EmitScalarExpr(E->getArg(1))));
216  }
217  case Builtin::BI__builtin_prefetch: {
218    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
219    // FIXME: Technically these constants should of type 'int', yes?
220    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
221      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
222    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
223      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
224    Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
225    return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
226  }
227  case Builtin::BI__builtin_trap: {
228    Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
229    return RValue::get(Builder.CreateCall(F));
230  }
231  case Builtin::BI__builtin_unreachable: {
232    if (CatchUndefined && HaveInsertPoint())
233      EmitBranch(getTrapBB());
234    Value *V = Builder.CreateUnreachable();
235    Builder.ClearInsertionPoint();
236    return RValue::get(V);
237  }
238
239  case Builtin::BI__builtin_powi:
240  case Builtin::BI__builtin_powif:
241  case Builtin::BI__builtin_powil: {
242    Value *Base = EmitScalarExpr(E->getArg(0));
243    Value *Exponent = EmitScalarExpr(E->getArg(1));
244    const llvm::Type *ArgType = Base->getType();
245    Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
246    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
247  }
248
249  case Builtin::BI__builtin_isgreater:
250  case Builtin::BI__builtin_isgreaterequal:
251  case Builtin::BI__builtin_isless:
252  case Builtin::BI__builtin_islessequal:
253  case Builtin::BI__builtin_islessgreater:
254  case Builtin::BI__builtin_isunordered: {
255    // Ordered comparisons: we know the arguments to these are matching scalar
256    // floating point values.
257    Value *LHS = EmitScalarExpr(E->getArg(0));
258    Value *RHS = EmitScalarExpr(E->getArg(1));
259
260    switch (BuiltinID) {
261    default: assert(0 && "Unknown ordered comparison");
262    case Builtin::BI__builtin_isgreater:
263      LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
264      break;
265    case Builtin::BI__builtin_isgreaterequal:
266      LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
267      break;
268    case Builtin::BI__builtin_isless:
269      LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
270      break;
271    case Builtin::BI__builtin_islessequal:
272      LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
273      break;
274    case Builtin::BI__builtin_islessgreater:
275      LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
276      break;
277    case Builtin::BI__builtin_isunordered:
278      LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
279      break;
280    }
281    // ZExt bool to int type.
282    return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
283                                          "tmp"));
284  }
285  case Builtin::BI__builtin_isnan: {
286    Value *V = EmitScalarExpr(E->getArg(0));
287    V = Builder.CreateFCmpUNO(V, V, "cmp");
288    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
289  }
290  case Builtin::BIalloca:
291  case Builtin::BI__builtin_alloca: {
292    // FIXME: LLVM IR Should allow alloca with an i64 size!
293    Value *Size = EmitScalarExpr(E->getArg(0));
294    Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
295    return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
296  }
297  case Builtin::BI__builtin_bzero: {
298    Value *Address = EmitScalarExpr(E->getArg(0));
299    Builder.CreateCall4(CGM.getMemSetFn(), Address,
300                        llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
301                        EmitScalarExpr(E->getArg(1)),
302                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
303    return RValue::get(Address);
304  }
305  case Builtin::BI__builtin_memcpy: {
306    Value *Address = EmitScalarExpr(E->getArg(0));
307    Builder.CreateCall4(CGM.getMemCpyFn(), Address,
308                        EmitScalarExpr(E->getArg(1)),
309                        EmitScalarExpr(E->getArg(2)),
310                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
311    return RValue::get(Address);
312  }
313  case Builtin::BI__builtin_memmove: {
314    Value *Address = EmitScalarExpr(E->getArg(0));
315    Builder.CreateCall4(CGM.getMemMoveFn(), Address,
316                        EmitScalarExpr(E->getArg(1)),
317                        EmitScalarExpr(E->getArg(2)),
318                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
319    return RValue::get(Address);
320  }
321  case Builtin::BI__builtin_memset: {
322    Value *Address = EmitScalarExpr(E->getArg(0));
323    Builder.CreateCall4(CGM.getMemSetFn(), Address,
324                        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
325                                            llvm::Type::getInt8Ty(VMContext)),
326                        EmitScalarExpr(E->getArg(2)),
327                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
328    return RValue::get(Address);
329  }
330  case Builtin::BI__builtin_return_address: {
331    Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
332    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
333  }
334  case Builtin::BI__builtin_frame_address: {
335    Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
336    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
337  }
338  case Builtin::BI__builtin_extract_return_addr: {
339    // FIXME: There should be a target hook for this
340    return RValue::get(EmitScalarExpr(E->getArg(0)));
341  }
342  case Builtin::BI__builtin_unwind_init: {
343    Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
344    return RValue::get(Builder.CreateCall(F));
345  }
346#if 0
347  // FIXME: Finish/enable when LLVM backend support stabilizes
348  case Builtin::BI__builtin_setjmp: {
349    Value *Buf = EmitScalarExpr(E->getArg(0));
350    // Store the frame pointer to the buffer
351    Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
352    Value *FrameAddr =
353        Builder.CreateCall(FrameAddrF,
354                           Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
355    Builder.CreateStore(FrameAddr, Buf);
356    // Call the setjmp intrinsic
357    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
358    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
359    Buf = Builder.CreateBitCast(Buf, DestType);
360    return RValue::get(Builder.CreateCall(F, Buf));
361  }
362  case Builtin::BI__builtin_longjmp: {
363    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
364    Value *Buf = EmitScalarExpr(E->getArg(0));
365    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
366    Buf = Builder.CreateBitCast(Buf, DestType);
367    return RValue::get(Builder.CreateCall(F, Buf));
368  }
369#endif
370  case Builtin::BI__sync_fetch_and_add:
371  case Builtin::BI__sync_fetch_and_sub:
372  case Builtin::BI__sync_fetch_and_or:
373  case Builtin::BI__sync_fetch_and_and:
374  case Builtin::BI__sync_fetch_and_xor:
375  case Builtin::BI__sync_add_and_fetch:
376  case Builtin::BI__sync_sub_and_fetch:
377  case Builtin::BI__sync_and_and_fetch:
378  case Builtin::BI__sync_or_and_fetch:
379  case Builtin::BI__sync_xor_and_fetch:
380  case Builtin::BI__sync_val_compare_and_swap:
381  case Builtin::BI__sync_bool_compare_and_swap:
382  case Builtin::BI__sync_lock_test_and_set:
383  case Builtin::BI__sync_lock_release:
384    assert(0 && "Shouldn't make it through sema");
385  case Builtin::BI__sync_fetch_and_add_1:
386  case Builtin::BI__sync_fetch_and_add_2:
387  case Builtin::BI__sync_fetch_and_add_4:
388  case Builtin::BI__sync_fetch_and_add_8:
389  case Builtin::BI__sync_fetch_and_add_16:
390    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
391  case Builtin::BI__sync_fetch_and_sub_1:
392  case Builtin::BI__sync_fetch_and_sub_2:
393  case Builtin::BI__sync_fetch_and_sub_4:
394  case Builtin::BI__sync_fetch_and_sub_8:
395  case Builtin::BI__sync_fetch_and_sub_16:
396    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
397  case Builtin::BI__sync_fetch_and_or_1:
398  case Builtin::BI__sync_fetch_and_or_2:
399  case Builtin::BI__sync_fetch_and_or_4:
400  case Builtin::BI__sync_fetch_and_or_8:
401  case Builtin::BI__sync_fetch_and_or_16:
402    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
403  case Builtin::BI__sync_fetch_and_and_1:
404  case Builtin::BI__sync_fetch_and_and_2:
405  case Builtin::BI__sync_fetch_and_and_4:
406  case Builtin::BI__sync_fetch_and_and_8:
407  case Builtin::BI__sync_fetch_and_and_16:
408    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
409  case Builtin::BI__sync_fetch_and_xor_1:
410  case Builtin::BI__sync_fetch_and_xor_2:
411  case Builtin::BI__sync_fetch_and_xor_4:
412  case Builtin::BI__sync_fetch_and_xor_8:
413  case Builtin::BI__sync_fetch_and_xor_16:
414    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
415  case Builtin::BI__sync_fetch_and_nand_1:
416  case Builtin::BI__sync_fetch_and_nand_2:
417  case Builtin::BI__sync_fetch_and_nand_4:
418  case Builtin::BI__sync_fetch_and_nand_8:
419  case Builtin::BI__sync_fetch_and_nand_16:
420    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
421
422  // Clang extensions: not overloaded yet.
423  case Builtin::BI__sync_fetch_and_min:
424    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
425  case Builtin::BI__sync_fetch_and_max:
426    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
427  case Builtin::BI__sync_fetch_and_umin:
428    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
429  case Builtin::BI__sync_fetch_and_umax:
430    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
431
432  case Builtin::BI__sync_add_and_fetch_1:
433  case Builtin::BI__sync_add_and_fetch_2:
434  case Builtin::BI__sync_add_and_fetch_4:
435  case Builtin::BI__sync_add_and_fetch_8:
436  case Builtin::BI__sync_add_and_fetch_16:
437    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
438                                llvm::Instruction::Add);
439  case Builtin::BI__sync_sub_and_fetch_1:
440  case Builtin::BI__sync_sub_and_fetch_2:
441  case Builtin::BI__sync_sub_and_fetch_4:
442  case Builtin::BI__sync_sub_and_fetch_8:
443  case Builtin::BI__sync_sub_and_fetch_16:
444    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
445                                llvm::Instruction::Sub);
446  case Builtin::BI__sync_and_and_fetch_1:
447  case Builtin::BI__sync_and_and_fetch_2:
448  case Builtin::BI__sync_and_and_fetch_4:
449  case Builtin::BI__sync_and_and_fetch_8:
450  case Builtin::BI__sync_and_and_fetch_16:
451    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
452                                llvm::Instruction::And);
453  case Builtin::BI__sync_or_and_fetch_1:
454  case Builtin::BI__sync_or_and_fetch_2:
455  case Builtin::BI__sync_or_and_fetch_4:
456  case Builtin::BI__sync_or_and_fetch_8:
457  case Builtin::BI__sync_or_and_fetch_16:
458    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
459                                llvm::Instruction::Or);
460  case Builtin::BI__sync_xor_and_fetch_1:
461  case Builtin::BI__sync_xor_and_fetch_2:
462  case Builtin::BI__sync_xor_and_fetch_4:
463  case Builtin::BI__sync_xor_and_fetch_8:
464  case Builtin::BI__sync_xor_and_fetch_16:
465    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
466                                llvm::Instruction::Xor);
467  case Builtin::BI__sync_nand_and_fetch_1:
468  case Builtin::BI__sync_nand_and_fetch_2:
469  case Builtin::BI__sync_nand_and_fetch_4:
470  case Builtin::BI__sync_nand_and_fetch_8:
471  case Builtin::BI__sync_nand_and_fetch_16:
472    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
473                                llvm::Instruction::And);
474
475  case Builtin::BI__sync_val_compare_and_swap_1:
476  case Builtin::BI__sync_val_compare_and_swap_2:
477  case Builtin::BI__sync_val_compare_and_swap_4:
478  case Builtin::BI__sync_val_compare_and_swap_8:
479  case Builtin::BI__sync_val_compare_and_swap_16:
480  {
481    const llvm::Type *ResType[2];
482    ResType[0]= ConvertType(E->getType());
483    ResType[1] = ConvertType(E->getArg(0)->getType());
484    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
485    return RValue::get(Builder.CreateCall3(AtomF,
486                                           EmitScalarExpr(E->getArg(0)),
487                                           EmitScalarExpr(E->getArg(1)),
488                                           EmitScalarExpr(E->getArg(2))));
489  }
490
491  case Builtin::BI__sync_bool_compare_and_swap_1:
492  case Builtin::BI__sync_bool_compare_and_swap_2:
493  case Builtin::BI__sync_bool_compare_and_swap_4:
494  case Builtin::BI__sync_bool_compare_and_swap_8:
495  case Builtin::BI__sync_bool_compare_and_swap_16:
496  {
497    const llvm::Type *ResType[2];
498    ResType[0]= ConvertType(E->getArg(1)->getType());
499    ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
500    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
501    Value *OldVal = EmitScalarExpr(E->getArg(1));
502    Value *PrevVal = Builder.CreateCall3(AtomF,
503                                        EmitScalarExpr(E->getArg(0)),
504                                        OldVal,
505                                        EmitScalarExpr(E->getArg(2)));
506    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
507    // zext bool to int.
508    return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
509  }
510
511  case Builtin::BI__sync_lock_test_and_set_1:
512  case Builtin::BI__sync_lock_test_and_set_2:
513  case Builtin::BI__sync_lock_test_and_set_4:
514  case Builtin::BI__sync_lock_test_and_set_8:
515  case Builtin::BI__sync_lock_test_and_set_16:
516    return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
517  case Builtin::BI__sync_lock_release_1:
518  case Builtin::BI__sync_lock_release_2:
519  case Builtin::BI__sync_lock_release_4:
520  case Builtin::BI__sync_lock_release_8:
521  case Builtin::BI__sync_lock_release_16: {
522    Value *Ptr = EmitScalarExpr(E->getArg(0));
523    const llvm::Type *ElTy =
524      cast<llvm::PointerType>(Ptr->getType())->getElementType();
525    llvm::StoreInst *Store =
526      Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
527    Store->setVolatile(true);
528    return RValue::get(0);
529  }
530
531  case Builtin::BI__sync_synchronize: {
532    Value *C[5];
533    C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1);
534    C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
535    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
536    return RValue::get(0);
537  }
538
539    // Library functions with special handling.
540  case Builtin::BIsqrt:
541  case Builtin::BIsqrtf:
542  case Builtin::BIsqrtl: {
543    // Rewrite sqrt to intrinsic if allowed.
544    if (!FD->hasAttr<ConstAttr>())
545      break;
546    Value *Arg0 = EmitScalarExpr(E->getArg(0));
547    const llvm::Type *ArgType = Arg0->getType();
548    Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
549    return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
550  }
551
552  case Builtin::BIpow:
553  case Builtin::BIpowf:
554  case Builtin::BIpowl: {
555    // Rewrite sqrt to intrinsic if allowed.
556    if (!FD->hasAttr<ConstAttr>())
557      break;
558    Value *Base = EmitScalarExpr(E->getArg(0));
559    Value *Exponent = EmitScalarExpr(E->getArg(1));
560    const llvm::Type *ArgType = Base->getType();
561    Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
562    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
563  }
564  }
565
566  // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
567  // that function.
568  if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
569      getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
570    return EmitCall(CGM.getBuiltinLibFunction(FD, BuiltinID),
571                    E->getCallee()->getType(), E->arg_begin(),
572                    E->arg_end());
573
574  // See if we have a target specific intrinsic.
575  const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
576  Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
577  if (const char *Prefix =
578      llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
579    IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
580
581  if (IntrinsicID != Intrinsic::not_intrinsic) {
582    SmallVector<Value*, 16> Args;
583
584    Function *F = CGM.getIntrinsic(IntrinsicID);
585    const llvm::FunctionType *FTy = F->getFunctionType();
586
587    for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
588      Value *ArgValue = EmitScalarExpr(E->getArg(i));
589
590      // If the intrinsic arg type is different from the builtin arg type
591      // we need to do a bit cast.
592      const llvm::Type *PTy = FTy->getParamType(i);
593      if (PTy != ArgValue->getType()) {
594        assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
595               "Must be able to losslessly bit cast to param");
596        ArgValue = Builder.CreateBitCast(ArgValue, PTy);
597      }
598
599      Args.push_back(ArgValue);
600    }
601
602    Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
603    QualType BuiltinRetType = E->getType();
604
605    const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
606    if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
607
608    if (RetTy != V->getType()) {
609      assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
610             "Must be able to losslessly bit cast result type");
611      V = Builder.CreateBitCast(V, RetTy);
612    }
613
614    return RValue::get(V);
615  }
616
617  // See if we have a target specific builtin that needs to be lowered.
618  if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
619    return RValue::get(V);
620
621  ErrorUnsupported(E, "builtin function");
622
623  // Unknown builtin, for now just dump it out and return undef.
624  if (hasAggregateLLVMType(E->getType()))
625    return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
626  return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
627}
628
629Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
630                                              const CallExpr *E) {
631  switch (Target.getTriple().getArch()) {
632  case llvm::Triple::x86:
633  case llvm::Triple::x86_64:
634    return EmitX86BuiltinExpr(BuiltinID, E);
635  case llvm::Triple::ppc:
636  case llvm::Triple::ppc64:
637    return EmitPPCBuiltinExpr(BuiltinID, E);
638  default:
639    return 0;
640  }
641}
642
643Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
644                                           const CallExpr *E) {
645
646  llvm::SmallVector<Value*, 4> Ops;
647
648  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
649    Ops.push_back(EmitScalarExpr(E->getArg(i)));
650
651  switch (BuiltinID) {
652  default: return 0;
653  case X86::BI__builtin_ia32_pslldi128:
654  case X86::BI__builtin_ia32_psllqi128:
655  case X86::BI__builtin_ia32_psllwi128:
656  case X86::BI__builtin_ia32_psradi128:
657  case X86::BI__builtin_ia32_psrawi128:
658  case X86::BI__builtin_ia32_psrldi128:
659  case X86::BI__builtin_ia32_psrlqi128:
660  case X86::BI__builtin_ia32_psrlwi128: {
661    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
662    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
663    llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
664    Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
665                                         Ops[1], Zero, "insert");
666    Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
667    const char *name = 0;
668    Intrinsic::ID ID = Intrinsic::not_intrinsic;
669
670    switch (BuiltinID) {
671    default: assert(0 && "Unsupported shift intrinsic!");
672    case X86::BI__builtin_ia32_pslldi128:
673      name = "pslldi";
674      ID = Intrinsic::x86_sse2_psll_d;
675      break;
676    case X86::BI__builtin_ia32_psllqi128:
677      name = "psllqi";
678      ID = Intrinsic::x86_sse2_psll_q;
679      break;
680    case X86::BI__builtin_ia32_psllwi128:
681      name = "psllwi";
682      ID = Intrinsic::x86_sse2_psll_w;
683      break;
684    case X86::BI__builtin_ia32_psradi128:
685      name = "psradi";
686      ID = Intrinsic::x86_sse2_psra_d;
687      break;
688    case X86::BI__builtin_ia32_psrawi128:
689      name = "psrawi";
690      ID = Intrinsic::x86_sse2_psra_w;
691      break;
692    case X86::BI__builtin_ia32_psrldi128:
693      name = "psrldi";
694      ID = Intrinsic::x86_sse2_psrl_d;
695      break;
696    case X86::BI__builtin_ia32_psrlqi128:
697      name = "psrlqi";
698      ID = Intrinsic::x86_sse2_psrl_q;
699      break;
700    case X86::BI__builtin_ia32_psrlwi128:
701      name = "psrlwi";
702      ID = Intrinsic::x86_sse2_psrl_w;
703      break;
704    }
705    llvm::Function *F = CGM.getIntrinsic(ID);
706    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
707  }
708  case X86::BI__builtin_ia32_pslldi:
709  case X86::BI__builtin_ia32_psllqi:
710  case X86::BI__builtin_ia32_psllwi:
711  case X86::BI__builtin_ia32_psradi:
712  case X86::BI__builtin_ia32_psrawi:
713  case X86::BI__builtin_ia32_psrldi:
714  case X86::BI__builtin_ia32_psrlqi:
715  case X86::BI__builtin_ia32_psrlwi: {
716    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
717    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
718    Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
719    const char *name = 0;
720    Intrinsic::ID ID = Intrinsic::not_intrinsic;
721
722    switch (BuiltinID) {
723    default: assert(0 && "Unsupported shift intrinsic!");
724    case X86::BI__builtin_ia32_pslldi:
725      name = "pslldi";
726      ID = Intrinsic::x86_mmx_psll_d;
727      break;
728    case X86::BI__builtin_ia32_psllqi:
729      name = "psllqi";
730      ID = Intrinsic::x86_mmx_psll_q;
731      break;
732    case X86::BI__builtin_ia32_psllwi:
733      name = "psllwi";
734      ID = Intrinsic::x86_mmx_psll_w;
735      break;
736    case X86::BI__builtin_ia32_psradi:
737      name = "psradi";
738      ID = Intrinsic::x86_mmx_psra_d;
739      break;
740    case X86::BI__builtin_ia32_psrawi:
741      name = "psrawi";
742      ID = Intrinsic::x86_mmx_psra_w;
743      break;
744    case X86::BI__builtin_ia32_psrldi:
745      name = "psrldi";
746      ID = Intrinsic::x86_mmx_psrl_d;
747      break;
748    case X86::BI__builtin_ia32_psrlqi:
749      name = "psrlqi";
750      ID = Intrinsic::x86_mmx_psrl_q;
751      break;
752    case X86::BI__builtin_ia32_psrlwi:
753      name = "psrlwi";
754      ID = Intrinsic::x86_mmx_psrl_w;
755      break;
756    }
757    llvm::Function *F = CGM.getIntrinsic(ID);
758    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
759  }
760  case X86::BI__builtin_ia32_cmpps: {
761    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
762    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
763  }
764  case X86::BI__builtin_ia32_cmpss: {
765    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
766    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
767  }
768  case X86::BI__builtin_ia32_ldmxcsr: {
769    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
770    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
771    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
772    Builder.CreateStore(Ops[0], Tmp);
773    return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
774                              Builder.CreateBitCast(Tmp, PtrTy));
775  }
776  case X86::BI__builtin_ia32_stmxcsr: {
777    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
778    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
779    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
780    One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
781                             Builder.CreateBitCast(Tmp, PtrTy));
782    return Builder.CreateLoad(Tmp, "stmxcsr");
783  }
784  case X86::BI__builtin_ia32_cmppd: {
785    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
786    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
787  }
788  case X86::BI__builtin_ia32_cmpsd: {
789    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
790    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
791  }
792  case X86::BI__builtin_ia32_storehps:
793  case X86::BI__builtin_ia32_storelps: {
794    const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
795    llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
796    llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
797
798    // cast val v2i64
799    Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
800
801    // extract (0, 1)
802    unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
803    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
804    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
805
806    // cast pointer to i64 & store
807    Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
808    return Builder.CreateStore(Ops[1], Ops[0]);
809  }
810  case X86::BI__builtin_ia32_palignr: {
811    Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
812    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
813  }
814  case X86::BI__builtin_ia32_palignr128: {
815    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
816
817    // If palignr is shifting the pair of input vectors less than 17 bytes,
818    // emit a shuffle instruction.
819    if (shiftVal <= 16) {
820      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
821
822      llvm::SmallVector<llvm::Constant*, 16> Indices;
823      for (unsigned i = 0; i != 16; ++i)
824        Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
825
826      Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
827      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
828    }
829
830    // If palignr is shifting the pair of input vectors more than 16 but less
831    // than 32 bytes, emit a logical right shift of the destination.
832    if (shiftVal < 32) {
833      const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
834      const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
835      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
836
837      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
838      Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
839
840      // create i32 constant
841      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
842      return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
843    }
844
845    // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
846    return llvm::Constant::getNullValue(ConvertType(E->getType()));
847  }
848  }
849}
850
851Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
852                                           const CallExpr *E) {
853  switch (BuiltinID) {
854  default: return 0;
855  }
856}
857