CGAtomic.cpp revision e469249726840b8baa2df008ca891e213cc5c661
1//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/IR/Operator.h"
22
23using namespace clang;
24using namespace CodeGen;
25
26// The ABI values for various atomic memory orderings.
27enum AtomicOrderingKind {
28  AO_ABI_memory_order_relaxed = 0,
29  AO_ABI_memory_order_consume = 1,
30  AO_ABI_memory_order_acquire = 2,
31  AO_ABI_memory_order_release = 3,
32  AO_ABI_memory_order_acq_rel = 4,
33  AO_ABI_memory_order_seq_cst = 5
34};
35
36namespace {
37  class AtomicInfo {
38    CodeGenFunction &CGF;
39    QualType AtomicTy;
40    QualType ValueTy;
41    uint64_t AtomicSizeInBits;
42    uint64_t ValueSizeInBits;
43    CharUnits AtomicAlign;
44    CharUnits ValueAlign;
45    CharUnits LValueAlign;
46    TypeEvaluationKind EvaluationKind;
47    bool UseLibcall;
48  public:
49    AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
50      assert(lvalue.isSimple());
51
52      AtomicTy = lvalue.getType();
53      ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
54      EvaluationKind = CGF.getEvaluationKind(ValueTy);
55
56      ASTContext &C = CGF.getContext();
57
58      uint64_t valueAlignInBits;
59      llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
60
61      uint64_t atomicAlignInBits;
62      llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
63
64      assert(ValueSizeInBits <= AtomicSizeInBits);
65      assert(valueAlignInBits <= atomicAlignInBits);
66
67      AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
68      ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
69      if (lvalue.getAlignment().isZero())
70        lvalue.setAlignment(AtomicAlign);
71
72      UseLibcall =
73        (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
74         AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
75    }
76
77    QualType getAtomicType() const { return AtomicTy; }
78    QualType getValueType() const { return ValueTy; }
79    CharUnits getAtomicAlignment() const { return AtomicAlign; }
80    CharUnits getValueAlignment() const { return ValueAlign; }
81    uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
82    uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
83    TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
84    bool shouldUseLibcall() const { return UseLibcall; }
85
86    /// Is the atomic size larger than the underlying value type?
87    ///
88    /// Note that the absence of padding does not mean that atomic
89    /// objects are completely interchangeable with non-atomic
90    /// objects: we might have promoted the alignment of a type
91    /// without making it bigger.
92    bool hasPadding() const {
93      return (ValueSizeInBits != AtomicSizeInBits);
94    }
95
96    void emitMemSetZeroIfNecessary(LValue dest) const;
97
98    llvm::Value *getAtomicSizeValue() const {
99      CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
100      return CGF.CGM.getSize(size);
101    }
102
103    /// Cast the given pointer to an integer pointer suitable for
104    /// atomic operations.
105    llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
106
107    /// Turn an atomic-layout object into an r-value.
108    RValue convertTempToRValue(llvm::Value *addr,
109                               AggValueSlot resultSlot) const;
110
111    /// Copy an atomic r-value into atomic-layout memory.
112    void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
113
114    /// Project an l-value down to the value field.
115    LValue projectValue(LValue lvalue) const {
116      llvm::Value *addr = lvalue.getAddress();
117      if (hasPadding())
118        addr = CGF.Builder.CreateStructGEP(addr, 0);
119
120      return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
121                              CGF.getContext(), lvalue.getTBAAInfo());
122    }
123
124    /// Materialize an atomic r-value in atomic-layout memory.
125    llvm::Value *materializeRValue(RValue rvalue) const;
126
127  private:
128    bool requiresMemSetZero(llvm::Type *type) const;
129  };
130}
131
132static RValue emitAtomicLibcall(CodeGenFunction &CGF,
133                                StringRef fnName,
134                                QualType resultType,
135                                CallArgList &args) {
136  const CGFunctionInfo &fnInfo =
137    CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
138            FunctionType::ExtInfo(), RequiredArgs::All);
139  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
140  llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
141  return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
142}
143
144/// Does a store of the given IR type modify the full expected width?
145static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
146                           uint64_t expectedSize) {
147  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
148}
149
150/// Does the atomic type require memsetting to zero before initialization?
151///
152/// The IR type is provided as a way of making certain queries faster.
153bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
154  // If the atomic type has size padding, we definitely need a memset.
155  if (hasPadding()) return true;
156
157  // Otherwise, do some simple heuristics to try to avoid it:
158  switch (getEvaluationKind()) {
159  // For scalars and complexes, check whether the store size of the
160  // type uses the full size.
161  case TEK_Scalar:
162    return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
163  case TEK_Complex:
164    return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
165                           AtomicSizeInBits / 2);
166
167  // Just be pessimistic about aggregates.
168  case TEK_Aggregate:
169    return true;
170  }
171  llvm_unreachable("bad evaluation kind");
172}
173
174void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
175  llvm::Value *addr = dest.getAddress();
176  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
177    return;
178
179  CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
180                           AtomicSizeInBits / 8,
181                           dest.getAlignment().getQuantity());
182}
183
184static void
185EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
186             llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
187             uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
188  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
189  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
190
191  switch (E->getOp()) {
192  case AtomicExpr::AO__c11_atomic_init:
193    llvm_unreachable("Already handled!");
194
195  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
196  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
197  case AtomicExpr::AO__atomic_compare_exchange:
198  case AtomicExpr::AO__atomic_compare_exchange_n: {
199    // Note that cmpxchg only supports specifying one ordering and
200    // doesn't support weak cmpxchg, at least at the moment.
201    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
202    LoadVal1->setAlignment(Align);
203    llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
204    LoadVal2->setAlignment(Align);
205    llvm::AtomicCmpXchgInst *CXI =
206        CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
207    CXI->setVolatile(E->isVolatile());
208    llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
209    StoreVal1->setAlignment(Align);
210    llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
211    CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
212    return;
213  }
214
215  case AtomicExpr::AO__c11_atomic_load:
216  case AtomicExpr::AO__atomic_load_n:
217  case AtomicExpr::AO__atomic_load: {
218    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
219    Load->setAtomic(Order);
220    Load->setAlignment(Size);
221    Load->setVolatile(E->isVolatile());
222    llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
223    StoreDest->setAlignment(Align);
224    return;
225  }
226
227  case AtomicExpr::AO__c11_atomic_store:
228  case AtomicExpr::AO__atomic_store:
229  case AtomicExpr::AO__atomic_store_n: {
230    assert(!Dest && "Store does not return a value");
231    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
232    LoadVal1->setAlignment(Align);
233    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
234    Store->setAtomic(Order);
235    Store->setAlignment(Size);
236    Store->setVolatile(E->isVolatile());
237    return;
238  }
239
240  case AtomicExpr::AO__c11_atomic_exchange:
241  case AtomicExpr::AO__atomic_exchange_n:
242  case AtomicExpr::AO__atomic_exchange:
243    Op = llvm::AtomicRMWInst::Xchg;
244    break;
245
246  case AtomicExpr::AO__atomic_add_fetch:
247    PostOp = llvm::Instruction::Add;
248    // Fall through.
249  case AtomicExpr::AO__c11_atomic_fetch_add:
250  case AtomicExpr::AO__atomic_fetch_add:
251    Op = llvm::AtomicRMWInst::Add;
252    break;
253
254  case AtomicExpr::AO__atomic_sub_fetch:
255    PostOp = llvm::Instruction::Sub;
256    // Fall through.
257  case AtomicExpr::AO__c11_atomic_fetch_sub:
258  case AtomicExpr::AO__atomic_fetch_sub:
259    Op = llvm::AtomicRMWInst::Sub;
260    break;
261
262  case AtomicExpr::AO__atomic_and_fetch:
263    PostOp = llvm::Instruction::And;
264    // Fall through.
265  case AtomicExpr::AO__c11_atomic_fetch_and:
266  case AtomicExpr::AO__atomic_fetch_and:
267    Op = llvm::AtomicRMWInst::And;
268    break;
269
270  case AtomicExpr::AO__atomic_or_fetch:
271    PostOp = llvm::Instruction::Or;
272    // Fall through.
273  case AtomicExpr::AO__c11_atomic_fetch_or:
274  case AtomicExpr::AO__atomic_fetch_or:
275    Op = llvm::AtomicRMWInst::Or;
276    break;
277
278  case AtomicExpr::AO__atomic_xor_fetch:
279    PostOp = llvm::Instruction::Xor;
280    // Fall through.
281  case AtomicExpr::AO__c11_atomic_fetch_xor:
282  case AtomicExpr::AO__atomic_fetch_xor:
283    Op = llvm::AtomicRMWInst::Xor;
284    break;
285
286  case AtomicExpr::AO__atomic_nand_fetch:
287    PostOp = llvm::Instruction::And;
288    // Fall through.
289  case AtomicExpr::AO__atomic_fetch_nand:
290    Op = llvm::AtomicRMWInst::Nand;
291    break;
292  }
293
294  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
295  LoadVal1->setAlignment(Align);
296  llvm::AtomicRMWInst *RMWI =
297      CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
298  RMWI->setVolatile(E->isVolatile());
299
300  // For __atomic_*_fetch operations, perform the operation again to
301  // determine the value which was written.
302  llvm::Value *Result = RMWI;
303  if (PostOp)
304    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
305  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
306    Result = CGF.Builder.CreateNot(Result);
307  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
308  StoreDest->setAlignment(Align);
309}
310
311// This function emits any expression (scalar, complex, or aggregate)
312// into a temporary alloca.
313static llvm::Value *
314EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
315  llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
316  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
317                       /*Init*/ true);
318  return DeclPtr;
319}
320
321static void
322AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
323                       bool UseOptimizedLibcall, llvm::Value *Val,
324                       QualType ValTy) {
325  if (UseOptimizedLibcall) {
326    // Load value and pass it to the function directly.
327    unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
328    Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy);
329    Args.add(RValue::get(Val), ValTy);
330  } else {
331    // Non-optimized functions always take a reference.
332    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
333                         CGF.getContext().VoidPtrTy);
334  }
335}
336
337RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
338  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
339  QualType MemTy = AtomicTy;
340  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
341    MemTy = AT->getValueType();
342  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
343  uint64_t Size = sizeChars.getQuantity();
344  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
345  unsigned Align = alignChars.getQuantity();
346  unsigned MaxInlineWidthInBits =
347    getTarget().getMaxAtomicInlineWidth();
348  bool UseLibcall = (Size != Align ||
349                     getContext().toBits(sizeChars) > MaxInlineWidthInBits);
350
351  llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
352  Ptr = EmitScalarExpr(E->getPtr());
353
354  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
355    assert(!Dest && "Init does not return a value");
356    LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
357    EmitAtomicInit(E->getVal1(), lvalue);
358    return RValue::get(0);
359  }
360
361  Order = EmitScalarExpr(E->getOrder());
362
363  switch (E->getOp()) {
364  case AtomicExpr::AO__c11_atomic_init:
365    llvm_unreachable("Already handled!");
366
367  case AtomicExpr::AO__c11_atomic_load:
368  case AtomicExpr::AO__atomic_load_n:
369    break;
370
371  case AtomicExpr::AO__atomic_load:
372    Dest = EmitScalarExpr(E->getVal1());
373    break;
374
375  case AtomicExpr::AO__atomic_store:
376    Val1 = EmitScalarExpr(E->getVal1());
377    break;
378
379  case AtomicExpr::AO__atomic_exchange:
380    Val1 = EmitScalarExpr(E->getVal1());
381    Dest = EmitScalarExpr(E->getVal2());
382    break;
383
384  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
385  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
386  case AtomicExpr::AO__atomic_compare_exchange_n:
387  case AtomicExpr::AO__atomic_compare_exchange:
388    Val1 = EmitScalarExpr(E->getVal1());
389    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
390      Val2 = EmitScalarExpr(E->getVal2());
391    else
392      Val2 = EmitValToTemp(*this, E->getVal2());
393    OrderFail = EmitScalarExpr(E->getOrderFail());
394    // Evaluate and discard the 'weak' argument.
395    if (E->getNumSubExprs() == 6)
396      EmitScalarExpr(E->getWeak());
397    break;
398
399  case AtomicExpr::AO__c11_atomic_fetch_add:
400  case AtomicExpr::AO__c11_atomic_fetch_sub:
401    if (MemTy->isPointerType()) {
402      // For pointer arithmetic, we're required to do a bit of math:
403      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
404      // ... but only for the C11 builtins. The GNU builtins expect the
405      // user to multiply by sizeof(T).
406      QualType Val1Ty = E->getVal1()->getType();
407      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
408      CharUnits PointeeIncAmt =
409          getContext().getTypeSizeInChars(MemTy->getPointeeType());
410      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
411      Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
412      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
413      break;
414    }
415    // Fall through.
416  case AtomicExpr::AO__atomic_fetch_add:
417  case AtomicExpr::AO__atomic_fetch_sub:
418  case AtomicExpr::AO__atomic_add_fetch:
419  case AtomicExpr::AO__atomic_sub_fetch:
420  case AtomicExpr::AO__c11_atomic_store:
421  case AtomicExpr::AO__c11_atomic_exchange:
422  case AtomicExpr::AO__atomic_store_n:
423  case AtomicExpr::AO__atomic_exchange_n:
424  case AtomicExpr::AO__c11_atomic_fetch_and:
425  case AtomicExpr::AO__c11_atomic_fetch_or:
426  case AtomicExpr::AO__c11_atomic_fetch_xor:
427  case AtomicExpr::AO__atomic_fetch_and:
428  case AtomicExpr::AO__atomic_fetch_or:
429  case AtomicExpr::AO__atomic_fetch_xor:
430  case AtomicExpr::AO__atomic_fetch_nand:
431  case AtomicExpr::AO__atomic_and_fetch:
432  case AtomicExpr::AO__atomic_or_fetch:
433  case AtomicExpr::AO__atomic_xor_fetch:
434  case AtomicExpr::AO__atomic_nand_fetch:
435    Val1 = EmitValToTemp(*this, E->getVal1());
436    break;
437  }
438
439  if (!E->getType()->isVoidType() && !Dest)
440    Dest = CreateMemTemp(E->getType(), ".atomicdst");
441
442  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
443  if (UseLibcall) {
444    bool UseOptimizedLibcall = false;
445    switch (E->getOp()) {
446    case AtomicExpr::AO__c11_atomic_fetch_add:
447    case AtomicExpr::AO__atomic_fetch_add:
448    case AtomicExpr::AO__c11_atomic_fetch_and:
449    case AtomicExpr::AO__atomic_fetch_and:
450    case AtomicExpr::AO__c11_atomic_fetch_or:
451    case AtomicExpr::AO__atomic_fetch_or:
452    case AtomicExpr::AO__c11_atomic_fetch_sub:
453    case AtomicExpr::AO__atomic_fetch_sub:
454    case AtomicExpr::AO__c11_atomic_fetch_xor:
455    case AtomicExpr::AO__atomic_fetch_xor:
456      // For these, only library calls for certain sizes exist.
457      UseOptimizedLibcall = true;
458      break;
459    default:
460      // Only use optimized library calls for sizes for which they exist.
461      if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
462        UseOptimizedLibcall = true;
463      break;
464    }
465
466    CallArgList Args;
467    if (!UseOptimizedLibcall) {
468      // For non-optimized library calls, the size is the first parameter
469      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
470               getContext().getSizeType());
471    }
472    // Atomic address is the first or second parameter
473    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
474             getContext().VoidPtrTy);
475
476    std::string LibCallName;
477    QualType RetTy;
478    bool HaveRetTy = false;
479    switch (E->getOp()) {
480    // There is only one libcall for compare an exchange, because there is no
481    // optimisation benefit possible from a libcall version of a weak compare
482    // and exchange.
483    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
484    //                                void *desired, int success, int failure)
485    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
486    //                                  int success, int failure)
487    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
488    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
489    case AtomicExpr::AO__atomic_compare_exchange:
490    case AtomicExpr::AO__atomic_compare_exchange_n:
491      LibCallName = "__atomic_compare_exchange";
492      RetTy = getContext().BoolTy;
493      HaveRetTy = true;
494      Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
495               getContext().VoidPtrTy);
496      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy);
497      Args.add(RValue::get(Order),
498               getContext().IntTy);
499      Order = OrderFail;
500      break;
501    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
502    //                        int order)
503    // T __atomic_exchange_N(T *mem, T val, int order)
504    case AtomicExpr::AO__c11_atomic_exchange:
505    case AtomicExpr::AO__atomic_exchange_n:
506    case AtomicExpr::AO__atomic_exchange:
507      LibCallName = "__atomic_exchange";
508      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
509      break;
510    // void __atomic_store(size_t size, void *mem, void *val, int order)
511    // void __atomic_store_N(T *mem, T val, int order)
512    case AtomicExpr::AO__c11_atomic_store:
513    case AtomicExpr::AO__atomic_store:
514    case AtomicExpr::AO__atomic_store_n:
515      LibCallName = "__atomic_store";
516      RetTy = getContext().VoidTy;
517      HaveRetTy = true;
518      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
519      break;
520    // void __atomic_load(size_t size, void *mem, void *return, int order)
521    // T __atomic_load_N(T *mem, int order)
522    case AtomicExpr::AO__c11_atomic_load:
523    case AtomicExpr::AO__atomic_load:
524    case AtomicExpr::AO__atomic_load_n:
525      LibCallName = "__atomic_load";
526      break;
527    // T __atomic_fetch_add_N(T *mem, T val, int order)
528    case AtomicExpr::AO__c11_atomic_fetch_add:
529    case AtomicExpr::AO__atomic_fetch_add:
530      LibCallName = "__atomic_fetch_add";
531      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
532      break;
533    // T __atomic_fetch_and_N(T *mem, T val, int order)
534    case AtomicExpr::AO__c11_atomic_fetch_and:
535    case AtomicExpr::AO__atomic_fetch_and:
536      LibCallName = "__atomic_fetch_and";
537      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
538      break;
539    // T __atomic_fetch_or_N(T *mem, T val, int order)
540    case AtomicExpr::AO__c11_atomic_fetch_or:
541    case AtomicExpr::AO__atomic_fetch_or:
542      LibCallName = "__atomic_fetch_or";
543      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
544      break;
545    // T __atomic_fetch_sub_N(T *mem, T val, int order)
546    case AtomicExpr::AO__c11_atomic_fetch_sub:
547    case AtomicExpr::AO__atomic_fetch_sub:
548      LibCallName = "__atomic_fetch_sub";
549      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
550      break;
551    // T __atomic_fetch_xor_N(T *mem, T val, int order)
552    case AtomicExpr::AO__c11_atomic_fetch_xor:
553    case AtomicExpr::AO__atomic_fetch_xor:
554      LibCallName = "__atomic_fetch_xor";
555      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
556      break;
557    default: return EmitUnsupportedRValue(E, "atomic library call");
558    }
559
560    // Optimized functions have the size in their name.
561    if (UseOptimizedLibcall)
562      LibCallName += "_" + llvm::utostr(Size);
563    // By default, assume we return a value of the atomic type.
564    if (!HaveRetTy) {
565      if (UseOptimizedLibcall) {
566        // Value is returned directly.
567        RetTy = MemTy;
568      } else {
569        // Value is returned through parameter before the order.
570        RetTy = getContext().VoidTy;
571        Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
572                 getContext().VoidPtrTy);
573      }
574    }
575    // order is always the last parameter
576    Args.add(RValue::get(Order),
577             getContext().IntTy);
578
579    const CGFunctionInfo &FuncInfo =
580        CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
581            FunctionType::ExtInfo(), RequiredArgs::All);
582    llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
583    llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
584    RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
585    if (!RetTy->isVoidType())
586      return Res;
587    if (E->getType()->isVoidType())
588      return RValue::get(0);
589    return convertTempToRValue(Dest, E->getType());
590  }
591
592  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
593                 E->getOp() == AtomicExpr::AO__atomic_store ||
594                 E->getOp() == AtomicExpr::AO__atomic_store_n;
595  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
596                E->getOp() == AtomicExpr::AO__atomic_load ||
597                E->getOp() == AtomicExpr::AO__atomic_load_n;
598
599  llvm::Type *IPtrTy =
600      llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
601  llvm::Value *OrigDest = Dest;
602  Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
603  if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
604  if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
605  if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
606
607  if (isa<llvm::ConstantInt>(Order)) {
608    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
609    switch (ord) {
610    case AO_ABI_memory_order_relaxed:
611      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
612                   llvm::Monotonic);
613      break;
614    case AO_ABI_memory_order_consume:
615    case AO_ABI_memory_order_acquire:
616      if (IsStore)
617        break; // Avoid crashing on code with undefined behavior
618      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
619                   llvm::Acquire);
620      break;
621    case AO_ABI_memory_order_release:
622      if (IsLoad)
623        break; // Avoid crashing on code with undefined behavior
624      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
625                   llvm::Release);
626      break;
627    case AO_ABI_memory_order_acq_rel:
628      if (IsLoad || IsStore)
629        break; // Avoid crashing on code with undefined behavior
630      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
631                   llvm::AcquireRelease);
632      break;
633    case AO_ABI_memory_order_seq_cst:
634      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
635                   llvm::SequentiallyConsistent);
636      break;
637    default: // invalid order
638      // We should not ever get here normally, but it's hard to
639      // enforce that in general.
640      break;
641    }
642    if (E->getType()->isVoidType())
643      return RValue::get(0);
644    return convertTempToRValue(OrigDest, E->getType());
645  }
646
647  // Long case, when Order isn't obviously constant.
648
649  // Create all the relevant BB's
650  llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
651                   *AcqRelBB = 0, *SeqCstBB = 0;
652  MonotonicBB = createBasicBlock("monotonic", CurFn);
653  if (!IsStore)
654    AcquireBB = createBasicBlock("acquire", CurFn);
655  if (!IsLoad)
656    ReleaseBB = createBasicBlock("release", CurFn);
657  if (!IsLoad && !IsStore)
658    AcqRelBB = createBasicBlock("acqrel", CurFn);
659  SeqCstBB = createBasicBlock("seqcst", CurFn);
660  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
661
662  // Create the switch for the split
663  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
664  // doesn't matter unless someone is crazy enough to use something that
665  // doesn't fold to a constant for the ordering.
666  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
667  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
668
669  // Emit all the different atomics
670  Builder.SetInsertPoint(MonotonicBB);
671  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
672               llvm::Monotonic);
673  Builder.CreateBr(ContBB);
674  if (!IsStore) {
675    Builder.SetInsertPoint(AcquireBB);
676    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
677                 llvm::Acquire);
678    Builder.CreateBr(ContBB);
679    SI->addCase(Builder.getInt32(1), AcquireBB);
680    SI->addCase(Builder.getInt32(2), AcquireBB);
681  }
682  if (!IsLoad) {
683    Builder.SetInsertPoint(ReleaseBB);
684    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
685                 llvm::Release);
686    Builder.CreateBr(ContBB);
687    SI->addCase(Builder.getInt32(3), ReleaseBB);
688  }
689  if (!IsLoad && !IsStore) {
690    Builder.SetInsertPoint(AcqRelBB);
691    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
692                 llvm::AcquireRelease);
693    Builder.CreateBr(ContBB);
694    SI->addCase(Builder.getInt32(4), AcqRelBB);
695  }
696  Builder.SetInsertPoint(SeqCstBB);
697  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
698               llvm::SequentiallyConsistent);
699  Builder.CreateBr(ContBB);
700  SI->addCase(Builder.getInt32(5), SeqCstBB);
701
702  // Cleanup and return
703  Builder.SetInsertPoint(ContBB);
704  if (E->getType()->isVoidType())
705    return RValue::get(0);
706  return convertTempToRValue(OrigDest, E->getType());
707}
708
709llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
710  unsigned addrspace =
711    cast<llvm::PointerType>(addr->getType())->getAddressSpace();
712  llvm::IntegerType *ty =
713    llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
714  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
715}
716
717RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
718                                       AggValueSlot resultSlot) const {
719  if (EvaluationKind == TEK_Aggregate) {
720    // Nothing to do if the result is ignored.
721    if (resultSlot.isIgnored()) return resultSlot.asRValue();
722
723    assert(resultSlot.getAddr() == addr || hasPadding());
724
725    // In these cases, we should have emitted directly into the result slot.
726    if (!hasPadding() || resultSlot.isValueOfAtomic())
727      return resultSlot.asRValue();
728
729    // Otherwise, fall into the common path.
730  }
731
732  // Drill into the padding structure if we have one.
733  if (hasPadding())
734    addr = CGF.Builder.CreateStructGEP(addr, 0);
735
736  // If we're emitting to an aggregate, copy into the result slot.
737  if (EvaluationKind == TEK_Aggregate) {
738    CGF.EmitAggregateCopy(resultSlot.getAddr(), addr, getValueType(),
739                          resultSlot.isVolatile());
740    return resultSlot.asRValue();
741  }
742
743  // Otherwise, just convert the temporary to an r-value using the
744  // normal conversion routine.
745  return CGF.convertTempToRValue(addr, getValueType());
746}
747
748/// Emit a load from an l-value of atomic type.  Note that the r-value
749/// we produce is an r-value of the atomic *value* type.
750RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
751  AtomicInfo atomics(*this, src);
752
753  // Check whether we should use a library call.
754  if (atomics.shouldUseLibcall()) {
755    llvm::Value *tempAddr;
756    if (resultSlot.isValueOfAtomic()) {
757      assert(atomics.getEvaluationKind() == TEK_Aggregate);
758      tempAddr = resultSlot.getPaddedAtomicAddr();
759    } else if (!resultSlot.isIgnored() && !atomics.hasPadding()) {
760      assert(atomics.getEvaluationKind() == TEK_Aggregate);
761      tempAddr = resultSlot.getAddr();
762    } else {
763      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
764    }
765
766    // void __atomic_load(size_t size, void *mem, void *return, int order);
767    CallArgList args;
768    args.add(RValue::get(atomics.getAtomicSizeValue()),
769             getContext().getSizeType());
770    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
771             getContext().VoidPtrTy);
772    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
773             getContext().VoidPtrTy);
774    args.add(RValue::get(llvm::ConstantInt::get(IntTy,
775                                                AO_ABI_memory_order_seq_cst)),
776             getContext().IntTy);
777    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
778
779    // Produce the r-value.
780    return atomics.convertTempToRValue(tempAddr, resultSlot);
781  }
782
783  // Okay, we're doing this natively.
784  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
785  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
786  load->setAtomic(llvm::SequentiallyConsistent);
787
788  // Other decoration.
789  load->setAlignment(src.getAlignment().getQuantity());
790  if (src.isVolatileQualified())
791    load->setVolatile(true);
792  if (src.getTBAAInfo())
793    CGM.DecorateInstruction(load, src.getTBAAInfo());
794
795  // Okay, turn that back into the original value type.
796  QualType valueType = atomics.getValueType();
797  llvm::Value *result = load;
798
799  // If we're ignoring an aggregate return, don't do anything.
800  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
801    return RValue::getAggregate(0, false);
802
803  // The easiest way to do this this is to go through memory, but we
804  // try not to in some easy cases.
805  if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
806    llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
807    if (isa<llvm::IntegerType>(resultTy)) {
808      assert(result->getType() == resultTy);
809      result = EmitFromMemory(result, valueType);
810    } else if (isa<llvm::PointerType>(resultTy)) {
811      result = Builder.CreateIntToPtr(result, resultTy);
812    } else {
813      result = Builder.CreateBitCast(result, resultTy);
814    }
815    return RValue::get(result);
816  }
817
818  // Create a temporary.  This needs to be big enough to hold the
819  // atomic integer.
820  llvm::Value *temp;
821  bool tempIsVolatile = false;
822  CharUnits tempAlignment;
823  if (atomics.getEvaluationKind() == TEK_Aggregate &&
824      (!atomics.hasPadding() || resultSlot.isValueOfAtomic())) {
825    assert(!resultSlot.isIgnored());
826    if (resultSlot.isValueOfAtomic()) {
827      temp = resultSlot.getPaddedAtomicAddr();
828      tempAlignment = atomics.getAtomicAlignment();
829    } else {
830      temp = resultSlot.getAddr();
831      tempAlignment = atomics.getValueAlignment();
832    }
833    tempIsVolatile = resultSlot.isVolatile();
834  } else {
835    temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
836    tempAlignment = atomics.getAtomicAlignment();
837  }
838
839  // Slam the integer into the temporary.
840  llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
841  Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
842    ->setVolatile(tempIsVolatile);
843
844  return atomics.convertTempToRValue(temp, resultSlot);
845}
846
847
848
849/// Copy an r-value into memory as part of storing to an atomic type.
850/// This needs to create a bit-pattern suitable for atomic operations.
851void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
852  // If we have an r-value, the rvalue should be of the atomic type,
853  // which means that the caller is responsible for having zeroed
854  // any padding.  Just do an aggregate copy of that type.
855  if (rvalue.isAggregate()) {
856    CGF.EmitAggregateCopy(dest.getAddress(),
857                          rvalue.getAggregateAddr(),
858                          getAtomicType(),
859                          (rvalue.isVolatileQualified()
860                           || dest.isVolatileQualified()),
861                          dest.getAlignment());
862    return;
863  }
864
865  // Okay, otherwise we're copying stuff.
866
867  // Zero out the buffer if necessary.
868  emitMemSetZeroIfNecessary(dest);
869
870  // Drill past the padding if present.
871  dest = projectValue(dest);
872
873  // Okay, store the rvalue in.
874  if (rvalue.isScalar()) {
875    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
876  } else {
877    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
878  }
879}
880
881
882/// Materialize an r-value into memory for the purposes of storing it
883/// to an atomic type.
884llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
885  // Aggregate r-values are already in memory, and EmitAtomicStore
886  // requires them to be values of the atomic type.
887  if (rvalue.isAggregate())
888    return rvalue.getAggregateAddr();
889
890  // Otherwise, make a temporary and materialize into it.
891  llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
892  LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
893  emitCopyIntoMemory(rvalue, tempLV);
894  return temp;
895}
896
897/// Emit a store to an l-value of atomic type.
898///
899/// Note that the r-value is expected to be an r-value *of the atomic
900/// type*; this means that for aggregate r-values, it should include
901/// storage for any padding that was necessary.
902void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
903                                      bool isInit) {
904  // If this is an aggregate r-value, it should agree in type except
905  // maybe for address-space qualification.
906  assert(!rvalue.isAggregate() ||
907         rvalue.getAggregateAddr()->getType()->getPointerElementType()
908           == dest.getAddress()->getType()->getPointerElementType());
909
910  AtomicInfo atomics(*this, dest);
911
912  // If this is an initialization, just put the value there normally.
913  if (isInit) {
914    atomics.emitCopyIntoMemory(rvalue, dest);
915    return;
916  }
917
918  // Check whether we should use a library call.
919  if (atomics.shouldUseLibcall()) {
920    // Produce a source address.
921    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
922
923    // void __atomic_store(size_t size, void *mem, void *val, int order)
924    CallArgList args;
925    args.add(RValue::get(atomics.getAtomicSizeValue()),
926             getContext().getSizeType());
927    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
928             getContext().VoidPtrTy);
929    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
930             getContext().VoidPtrTy);
931    args.add(RValue::get(llvm::ConstantInt::get(IntTy,
932                                                AO_ABI_memory_order_seq_cst)),
933             getContext().IntTy);
934    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
935    return;
936  }
937
938  // Okay, we're doing this natively.
939  llvm::Value *intValue;
940
941  // If we've got a scalar value of the right size, try to avoid going
942  // through memory.
943  if (rvalue.isScalar() && !atomics.hasPadding()) {
944    llvm::Value *value = rvalue.getScalarVal();
945    if (isa<llvm::IntegerType>(value->getType())) {
946      intValue = value;
947    } else {
948      llvm::IntegerType *inputIntTy =
949        llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
950      if (isa<llvm::PointerType>(value->getType())) {
951        intValue = Builder.CreatePtrToInt(value, inputIntTy);
952      } else {
953        intValue = Builder.CreateBitCast(value, inputIntTy);
954      }
955    }
956
957  // Otherwise, we need to go through memory.
958  } else {
959    // Put the r-value in memory.
960    llvm::Value *addr = atomics.materializeRValue(rvalue);
961
962    // Cast the temporary to the atomic int type and pull a value out.
963    addr = atomics.emitCastToAtomicIntPointer(addr);
964    intValue = Builder.CreateAlignedLoad(addr,
965                                 atomics.getAtomicAlignment().getQuantity());
966  }
967
968  // Do the atomic store.
969  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
970  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
971
972  // Initializations don't need to be atomic.
973  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
974
975  // Other decoration.
976  store->setAlignment(dest.getAlignment().getQuantity());
977  if (dest.isVolatileQualified())
978    store->setVolatile(true);
979  if (dest.getTBAAInfo())
980    CGM.DecorateInstruction(store, dest.getTBAAInfo());
981}
982
983void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
984  AtomicInfo atomics(*this, dest);
985
986  switch (atomics.getEvaluationKind()) {
987  case TEK_Scalar: {
988    llvm::Value *value = EmitScalarExpr(init);
989    atomics.emitCopyIntoMemory(RValue::get(value), dest);
990    return;
991  }
992
993  case TEK_Complex: {
994    ComplexPairTy value = EmitComplexExpr(init);
995    atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
996    return;
997  }
998
999  case TEK_Aggregate: {
1000    // Memset the buffer first if there's any possibility of
1001    // uninitialized internal bits.
1002    atomics.emitMemSetZeroIfNecessary(dest);
1003
1004    // HACK: whether the initializer actually has an atomic type
1005    // doesn't really seem reliable right now.
1006    if (!init->getType()->isAtomicType()) {
1007      dest = atomics.projectValue(dest);
1008    }
1009
1010    // Evaluate the expression directly into the destination.
1011    AggValueSlot slot = AggValueSlot::forLValue(dest,
1012                                        AggValueSlot::IsNotDestructed,
1013                                        AggValueSlot::DoesNotNeedGCBarriers,
1014                                        AggValueSlot::IsNotAliased);
1015    EmitAggExpr(init, slot);
1016    return;
1017  }
1018  }
1019  llvm_unreachable("bad evaluation kind");
1020}
1021