1//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
18#include "clang/CodeGen/CGFunctionInfo.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/Operator.h"
23
24using namespace clang;
25using namespace CodeGen;
26
27namespace {
28  class AtomicInfo {
29    CodeGenFunction &CGF;
30    QualType AtomicTy;
31    QualType ValueTy;
32    uint64_t AtomicSizeInBits;
33    uint64_t ValueSizeInBits;
34    CharUnits AtomicAlign;
35    CharUnits ValueAlign;
36    CharUnits LValueAlign;
37    TypeEvaluationKind EvaluationKind;
38    bool UseLibcall;
39  public:
40    AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41      assert(lvalue.isSimple());
42
43      AtomicTy = lvalue.getType();
44      ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45      EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47      ASTContext &C = CGF.getContext();
48
49      uint64_t valueAlignInBits;
50      std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
51
52      uint64_t atomicAlignInBits;
53      std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
54
55      assert(ValueSizeInBits <= AtomicSizeInBits);
56      assert(valueAlignInBits <= atomicAlignInBits);
57
58      AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59      ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60      if (lvalue.getAlignment().isZero())
61        lvalue.setAlignment(AtomicAlign);
62
63      UseLibcall =
64        (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65         AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66    }
67
68    QualType getAtomicType() const { return AtomicTy; }
69    QualType getValueType() const { return ValueTy; }
70    CharUnits getAtomicAlignment() const { return AtomicAlign; }
71    CharUnits getValueAlignment() const { return ValueAlign; }
72    uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73    uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74    TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75    bool shouldUseLibcall() const { return UseLibcall; }
76
77    /// Is the atomic size larger than the underlying value type?
78    ///
79    /// Note that the absence of padding does not mean that atomic
80    /// objects are completely interchangeable with non-atomic
81    /// objects: we might have promoted the alignment of a type
82    /// without making it bigger.
83    bool hasPadding() const {
84      return (ValueSizeInBits != AtomicSizeInBits);
85    }
86
87    bool emitMemSetZeroIfNecessary(LValue dest) const;
88
89    llvm::Value *getAtomicSizeValue() const {
90      CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91      return CGF.CGM.getSize(size);
92    }
93
94    /// Cast the given pointer to an integer pointer suitable for
95    /// atomic operations.
96    llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97
98    /// Turn an atomic-layout object into an r-value.
99    RValue convertTempToRValue(llvm::Value *addr,
100                               AggValueSlot resultSlot,
101                               SourceLocation loc) const;
102
103    /// Copy an atomic r-value into atomic-layout memory.
104    void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105
106    /// Project an l-value down to the value field.
107    LValue projectValue(LValue lvalue) const {
108      llvm::Value *addr = lvalue.getAddress();
109      if (hasPadding())
110        addr = CGF.Builder.CreateStructGEP(addr, 0);
111
112      return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113                              CGF.getContext(), lvalue.getTBAAInfo());
114    }
115
116    /// Materialize an atomic r-value in atomic-layout memory.
117    llvm::Value *materializeRValue(RValue rvalue) const;
118
119  private:
120    bool requiresMemSetZero(llvm::Type *type) const;
121  };
122}
123
124static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125                                StringRef fnName,
126                                QualType resultType,
127                                CallArgList &args) {
128  const CGFunctionInfo &fnInfo =
129    CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130            FunctionType::ExtInfo(), RequiredArgs::All);
131  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132  llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133  return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134}
135
136/// Does a store of the given IR type modify the full expected width?
137static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138                           uint64_t expectedSize) {
139  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140}
141
142/// Does the atomic type require memsetting to zero before initialization?
143///
144/// The IR type is provided as a way of making certain queries faster.
145bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146  // If the atomic type has size padding, we definitely need a memset.
147  if (hasPadding()) return true;
148
149  // Otherwise, do some simple heuristics to try to avoid it:
150  switch (getEvaluationKind()) {
151  // For scalars and complexes, check whether the store size of the
152  // type uses the full size.
153  case TEK_Scalar:
154    return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155  case TEK_Complex:
156    return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157                           AtomicSizeInBits / 2);
158
159  // Padding in structs has an undefined bit pattern.  User beware.
160  case TEK_Aggregate:
161    return false;
162  }
163  llvm_unreachable("bad evaluation kind");
164}
165
166bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
167  llvm::Value *addr = dest.getAddress();
168  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
169    return false;
170
171  CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172                           AtomicSizeInBits / 8,
173                           dest.getAlignment().getQuantity());
174  return true;
175}
176
177static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
178                              llvm::Value *Dest, llvm::Value *Ptr,
179                              llvm::Value *Val1, llvm::Value *Val2,
180                              uint64_t Size, unsigned Align,
181                              llvm::AtomicOrdering SuccessOrder,
182                              llvm::AtomicOrdering FailureOrder) {
183  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184  llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185  Expected->setAlignment(Align);
186  llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187  Desired->setAlignment(Align);
188
189  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
190      Ptr, Expected, Desired, SuccessOrder, FailureOrder);
191  Pair->setVolatile(E->isVolatile());
192  Pair->setWeak(IsWeak);
193
194  // Cmp holds the result of the compare-exchange operation: true on success,
195  // false on failure.
196  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
197  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
198
199  // This basic block is used to hold the store instruction if the operation
200  // failed.
201  llvm::BasicBlock *StoreExpectedBB =
202      CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
203
204  // This basic block is the exit point of the operation, we should end up
205  // here regardless of whether or not the operation succeeded.
206  llvm::BasicBlock *ContinueBB =
207      CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
208
209  // Update Expected if Expected isn't equal to Old, otherwise branch to the
210  // exit point.
211  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
212
213  CGF.Builder.SetInsertPoint(StoreExpectedBB);
214  // Update the memory at Expected with Old's value.
215  llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
216  StoreExpected->setAlignment(Align);
217  // Finally, branch to the exit point.
218  CGF.Builder.CreateBr(ContinueBB);
219
220  CGF.Builder.SetInsertPoint(ContinueBB);
221  // Update the memory at Dest with Cmp's value.
222  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
223  return;
224}
225
226/// Given an ordering required on success, emit all possible cmpxchg
227/// instructions to cope with the provided (but possibly only dynamically known)
228/// FailureOrder.
229static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
230                                        bool IsWeak, llvm::Value *Dest,
231                                        llvm::Value *Ptr, llvm::Value *Val1,
232                                        llvm::Value *Val2,
233                                        llvm::Value *FailureOrderVal,
234                                        uint64_t Size, unsigned Align,
235                                        llvm::AtomicOrdering SuccessOrder) {
236  llvm::AtomicOrdering FailureOrder;
237  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
238    switch (FO->getSExtValue()) {
239    default:
240      FailureOrder = llvm::Monotonic;
241      break;
242    case AtomicExpr::AO_ABI_memory_order_consume:
243    case AtomicExpr::AO_ABI_memory_order_acquire:
244      FailureOrder = llvm::Acquire;
245      break;
246    case AtomicExpr::AO_ABI_memory_order_seq_cst:
247      FailureOrder = llvm::SequentiallyConsistent;
248      break;
249    }
250    if (FailureOrder >= SuccessOrder) {
251      // Don't assert on undefined behaviour.
252      FailureOrder =
253        llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
254    }
255    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
256                      SuccessOrder, FailureOrder);
257    return;
258  }
259
260  // Create all the relevant BB's
261  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
262                   *SeqCstBB = nullptr;
263  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
264  if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
265    AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
266  if (SuccessOrder == llvm::SequentiallyConsistent)
267    SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
268
269  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
270
271  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
272
273  // Emit all the different atomics
274
275  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
276  // doesn't matter unless someone is crazy enough to use something that
277  // doesn't fold to a constant for the ordering.
278  CGF.Builder.SetInsertPoint(MonotonicBB);
279  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
280                    Size, Align, SuccessOrder, llvm::Monotonic);
281  CGF.Builder.CreateBr(ContBB);
282
283  if (AcquireBB) {
284    CGF.Builder.SetInsertPoint(AcquireBB);
285    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
286                      Size, Align, SuccessOrder, llvm::Acquire);
287    CGF.Builder.CreateBr(ContBB);
288    SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
289                AcquireBB);
290    SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
291                AcquireBB);
292  }
293  if (SeqCstBB) {
294    CGF.Builder.SetInsertPoint(SeqCstBB);
295    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
296                      Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
297    CGF.Builder.CreateBr(ContBB);
298    SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
299                SeqCstBB);
300  }
301
302  CGF.Builder.SetInsertPoint(ContBB);
303}
304
305static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
306                         llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
307                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
308                         uint64_t Size, unsigned Align,
309                         llvm::AtomicOrdering Order) {
310  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
311  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
312
313  switch (E->getOp()) {
314  case AtomicExpr::AO__c11_atomic_init:
315    llvm_unreachable("Already handled!");
316
317  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
318    emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
319                                FailureOrder, Size, Align, Order);
320    return;
321  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
322    emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
323                                FailureOrder, Size, Align, Order);
324    return;
325  case AtomicExpr::AO__atomic_compare_exchange:
326  case AtomicExpr::AO__atomic_compare_exchange_n: {
327    if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
328      emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
329                                  Val1, Val2, FailureOrder, Size, Align, Order);
330    } else {
331      // Create all the relevant BB's
332      llvm::BasicBlock *StrongBB =
333          CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
334      llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
335      llvm::BasicBlock *ContBB =
336          CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
337
338      llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
339      SI->addCase(CGF.Builder.getInt1(false), StrongBB);
340
341      CGF.Builder.SetInsertPoint(StrongBB);
342      emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
343                                  FailureOrder, Size, Align, Order);
344      CGF.Builder.CreateBr(ContBB);
345
346      CGF.Builder.SetInsertPoint(WeakBB);
347      emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
348                                  FailureOrder, Size, Align, Order);
349      CGF.Builder.CreateBr(ContBB);
350
351      CGF.Builder.SetInsertPoint(ContBB);
352    }
353    return;
354  }
355  case AtomicExpr::AO__c11_atomic_load:
356  case AtomicExpr::AO__atomic_load_n:
357  case AtomicExpr::AO__atomic_load: {
358    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
359    Load->setAtomic(Order);
360    Load->setAlignment(Size);
361    Load->setVolatile(E->isVolatile());
362    llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
363    StoreDest->setAlignment(Align);
364    return;
365  }
366
367  case AtomicExpr::AO__c11_atomic_store:
368  case AtomicExpr::AO__atomic_store:
369  case AtomicExpr::AO__atomic_store_n: {
370    assert(!Dest && "Store does not return a value");
371    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
372    LoadVal1->setAlignment(Align);
373    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
374    Store->setAtomic(Order);
375    Store->setAlignment(Size);
376    Store->setVolatile(E->isVolatile());
377    return;
378  }
379
380  case AtomicExpr::AO__c11_atomic_exchange:
381  case AtomicExpr::AO__atomic_exchange_n:
382  case AtomicExpr::AO__atomic_exchange:
383    Op = llvm::AtomicRMWInst::Xchg;
384    break;
385
386  case AtomicExpr::AO__atomic_add_fetch:
387    PostOp = llvm::Instruction::Add;
388    // Fall through.
389  case AtomicExpr::AO__c11_atomic_fetch_add:
390  case AtomicExpr::AO__atomic_fetch_add:
391    Op = llvm::AtomicRMWInst::Add;
392    break;
393
394  case AtomicExpr::AO__atomic_sub_fetch:
395    PostOp = llvm::Instruction::Sub;
396    // Fall through.
397  case AtomicExpr::AO__c11_atomic_fetch_sub:
398  case AtomicExpr::AO__atomic_fetch_sub:
399    Op = llvm::AtomicRMWInst::Sub;
400    break;
401
402  case AtomicExpr::AO__atomic_and_fetch:
403    PostOp = llvm::Instruction::And;
404    // Fall through.
405  case AtomicExpr::AO__c11_atomic_fetch_and:
406  case AtomicExpr::AO__atomic_fetch_and:
407    Op = llvm::AtomicRMWInst::And;
408    break;
409
410  case AtomicExpr::AO__atomic_or_fetch:
411    PostOp = llvm::Instruction::Or;
412    // Fall through.
413  case AtomicExpr::AO__c11_atomic_fetch_or:
414  case AtomicExpr::AO__atomic_fetch_or:
415    Op = llvm::AtomicRMWInst::Or;
416    break;
417
418  case AtomicExpr::AO__atomic_xor_fetch:
419    PostOp = llvm::Instruction::Xor;
420    // Fall through.
421  case AtomicExpr::AO__c11_atomic_fetch_xor:
422  case AtomicExpr::AO__atomic_fetch_xor:
423    Op = llvm::AtomicRMWInst::Xor;
424    break;
425
426  case AtomicExpr::AO__atomic_nand_fetch:
427    PostOp = llvm::Instruction::And;
428    // Fall through.
429  case AtomicExpr::AO__atomic_fetch_nand:
430    Op = llvm::AtomicRMWInst::Nand;
431    break;
432  }
433
434  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
435  LoadVal1->setAlignment(Align);
436  llvm::AtomicRMWInst *RMWI =
437      CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
438  RMWI->setVolatile(E->isVolatile());
439
440  // For __atomic_*_fetch operations, perform the operation again to
441  // determine the value which was written.
442  llvm::Value *Result = RMWI;
443  if (PostOp)
444    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
445  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
446    Result = CGF.Builder.CreateNot(Result);
447  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
448  StoreDest->setAlignment(Align);
449}
450
451// This function emits any expression (scalar, complex, or aggregate)
452// into a temporary alloca.
453static llvm::Value *
454EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
455  llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
456  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
457                       /*Init*/ true);
458  return DeclPtr;
459}
460
461static void
462AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
463                  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
464                  SourceLocation Loc) {
465  if (UseOptimizedLibcall) {
466    // Load value and pass it to the function directly.
467    unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
468    Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
469    Args.add(RValue::get(Val), ValTy);
470  } else {
471    // Non-optimized functions always take a reference.
472    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
473                         CGF.getContext().VoidPtrTy);
474  }
475}
476
477RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
478  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
479  QualType MemTy = AtomicTy;
480  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
481    MemTy = AT->getValueType();
482  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
483  uint64_t Size = sizeChars.getQuantity();
484  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
485  unsigned Align = alignChars.getQuantity();
486  unsigned MaxInlineWidthInBits =
487    getTarget().getMaxAtomicInlineWidth();
488  bool UseLibcall = (Size != Align ||
489                     getContext().toBits(sizeChars) > MaxInlineWidthInBits);
490
491  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
492              *Val2 = nullptr;
493  llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
494
495  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
496    assert(!Dest && "Init does not return a value");
497    LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
498    EmitAtomicInit(E->getVal1(), lvalue);
499    return RValue::get(nullptr);
500  }
501
502  llvm::Value *Order = EmitScalarExpr(E->getOrder());
503
504  switch (E->getOp()) {
505  case AtomicExpr::AO__c11_atomic_init:
506    llvm_unreachable("Already handled!");
507
508  case AtomicExpr::AO__c11_atomic_load:
509  case AtomicExpr::AO__atomic_load_n:
510    break;
511
512  case AtomicExpr::AO__atomic_load:
513    Dest = EmitScalarExpr(E->getVal1());
514    break;
515
516  case AtomicExpr::AO__atomic_store:
517    Val1 = EmitScalarExpr(E->getVal1());
518    break;
519
520  case AtomicExpr::AO__atomic_exchange:
521    Val1 = EmitScalarExpr(E->getVal1());
522    Dest = EmitScalarExpr(E->getVal2());
523    break;
524
525  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
526  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
527  case AtomicExpr::AO__atomic_compare_exchange_n:
528  case AtomicExpr::AO__atomic_compare_exchange:
529    Val1 = EmitScalarExpr(E->getVal1());
530    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
531      Val2 = EmitScalarExpr(E->getVal2());
532    else
533      Val2 = EmitValToTemp(*this, E->getVal2());
534    OrderFail = EmitScalarExpr(E->getOrderFail());
535    if (E->getNumSubExprs() == 6)
536      IsWeak = EmitScalarExpr(E->getWeak());
537    break;
538
539  case AtomicExpr::AO__c11_atomic_fetch_add:
540  case AtomicExpr::AO__c11_atomic_fetch_sub:
541    if (MemTy->isPointerType()) {
542      // For pointer arithmetic, we're required to do a bit of math:
543      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
544      // ... but only for the C11 builtins. The GNU builtins expect the
545      // user to multiply by sizeof(T).
546      QualType Val1Ty = E->getVal1()->getType();
547      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
548      CharUnits PointeeIncAmt =
549          getContext().getTypeSizeInChars(MemTy->getPointeeType());
550      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
551      Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
552      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
553      break;
554    }
555    // Fall through.
556  case AtomicExpr::AO__atomic_fetch_add:
557  case AtomicExpr::AO__atomic_fetch_sub:
558  case AtomicExpr::AO__atomic_add_fetch:
559  case AtomicExpr::AO__atomic_sub_fetch:
560  case AtomicExpr::AO__c11_atomic_store:
561  case AtomicExpr::AO__c11_atomic_exchange:
562  case AtomicExpr::AO__atomic_store_n:
563  case AtomicExpr::AO__atomic_exchange_n:
564  case AtomicExpr::AO__c11_atomic_fetch_and:
565  case AtomicExpr::AO__c11_atomic_fetch_or:
566  case AtomicExpr::AO__c11_atomic_fetch_xor:
567  case AtomicExpr::AO__atomic_fetch_and:
568  case AtomicExpr::AO__atomic_fetch_or:
569  case AtomicExpr::AO__atomic_fetch_xor:
570  case AtomicExpr::AO__atomic_fetch_nand:
571  case AtomicExpr::AO__atomic_and_fetch:
572  case AtomicExpr::AO__atomic_or_fetch:
573  case AtomicExpr::AO__atomic_xor_fetch:
574  case AtomicExpr::AO__atomic_nand_fetch:
575    Val1 = EmitValToTemp(*this, E->getVal1());
576    break;
577  }
578
579  if (!E->getType()->isVoidType() && !Dest)
580    Dest = CreateMemTemp(E->getType(), ".atomicdst");
581
582  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
583  if (UseLibcall) {
584    bool UseOptimizedLibcall = false;
585    switch (E->getOp()) {
586    case AtomicExpr::AO__c11_atomic_fetch_add:
587    case AtomicExpr::AO__atomic_fetch_add:
588    case AtomicExpr::AO__c11_atomic_fetch_and:
589    case AtomicExpr::AO__atomic_fetch_and:
590    case AtomicExpr::AO__c11_atomic_fetch_or:
591    case AtomicExpr::AO__atomic_fetch_or:
592    case AtomicExpr::AO__c11_atomic_fetch_sub:
593    case AtomicExpr::AO__atomic_fetch_sub:
594    case AtomicExpr::AO__c11_atomic_fetch_xor:
595    case AtomicExpr::AO__atomic_fetch_xor:
596      // For these, only library calls for certain sizes exist.
597      UseOptimizedLibcall = true;
598      break;
599    default:
600      // Only use optimized library calls for sizes for which they exist.
601      if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
602        UseOptimizedLibcall = true;
603      break;
604    }
605
606    CallArgList Args;
607    if (!UseOptimizedLibcall) {
608      // For non-optimized library calls, the size is the first parameter
609      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
610               getContext().getSizeType());
611    }
612    // Atomic address is the first or second parameter
613    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
614
615    std::string LibCallName;
616    QualType LoweredMemTy =
617      MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
618    QualType RetTy;
619    bool HaveRetTy = false;
620    switch (E->getOp()) {
621    // There is only one libcall for compare an exchange, because there is no
622    // optimisation benefit possible from a libcall version of a weak compare
623    // and exchange.
624    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
625    //                                void *desired, int success, int failure)
626    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
627    //                                  int success, int failure)
628    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
629    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
630    case AtomicExpr::AO__atomic_compare_exchange:
631    case AtomicExpr::AO__atomic_compare_exchange_n:
632      LibCallName = "__atomic_compare_exchange";
633      RetTy = getContext().BoolTy;
634      HaveRetTy = true;
635      Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
636      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
637                        E->getExprLoc());
638      Args.add(RValue::get(Order), getContext().IntTy);
639      Order = OrderFail;
640      break;
641    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
642    //                        int order)
643    // T __atomic_exchange_N(T *mem, T val, int order)
644    case AtomicExpr::AO__c11_atomic_exchange:
645    case AtomicExpr::AO__atomic_exchange_n:
646    case AtomicExpr::AO__atomic_exchange:
647      LibCallName = "__atomic_exchange";
648      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
649                        E->getExprLoc());
650      break;
651    // void __atomic_store(size_t size, void *mem, void *val, int order)
652    // void __atomic_store_N(T *mem, T val, int order)
653    case AtomicExpr::AO__c11_atomic_store:
654    case AtomicExpr::AO__atomic_store:
655    case AtomicExpr::AO__atomic_store_n:
656      LibCallName = "__atomic_store";
657      RetTy = getContext().VoidTy;
658      HaveRetTy = true;
659      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
660                        E->getExprLoc());
661      break;
662    // void __atomic_load(size_t size, void *mem, void *return, int order)
663    // T __atomic_load_N(T *mem, int order)
664    case AtomicExpr::AO__c11_atomic_load:
665    case AtomicExpr::AO__atomic_load:
666    case AtomicExpr::AO__atomic_load_n:
667      LibCallName = "__atomic_load";
668      break;
669    // T __atomic_fetch_add_N(T *mem, T val, int order)
670    case AtomicExpr::AO__c11_atomic_fetch_add:
671    case AtomicExpr::AO__atomic_fetch_add:
672      LibCallName = "__atomic_fetch_add";
673      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
674                        E->getExprLoc());
675      break;
676    // T __atomic_fetch_and_N(T *mem, T val, int order)
677    case AtomicExpr::AO__c11_atomic_fetch_and:
678    case AtomicExpr::AO__atomic_fetch_and:
679      LibCallName = "__atomic_fetch_and";
680      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
681                        E->getExprLoc());
682      break;
683    // T __atomic_fetch_or_N(T *mem, T val, int order)
684    case AtomicExpr::AO__c11_atomic_fetch_or:
685    case AtomicExpr::AO__atomic_fetch_or:
686      LibCallName = "__atomic_fetch_or";
687      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
688                        E->getExprLoc());
689      break;
690    // T __atomic_fetch_sub_N(T *mem, T val, int order)
691    case AtomicExpr::AO__c11_atomic_fetch_sub:
692    case AtomicExpr::AO__atomic_fetch_sub:
693      LibCallName = "__atomic_fetch_sub";
694      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
695                        E->getExprLoc());
696      break;
697    // T __atomic_fetch_xor_N(T *mem, T val, int order)
698    case AtomicExpr::AO__c11_atomic_fetch_xor:
699    case AtomicExpr::AO__atomic_fetch_xor:
700      LibCallName = "__atomic_fetch_xor";
701      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
702                        E->getExprLoc());
703      break;
704    default: return EmitUnsupportedRValue(E, "atomic library call");
705    }
706
707    // Optimized functions have the size in their name.
708    if (UseOptimizedLibcall)
709      LibCallName += "_" + llvm::utostr(Size);
710    // By default, assume we return a value of the atomic type.
711    if (!HaveRetTy) {
712      if (UseOptimizedLibcall) {
713        // Value is returned directly.
714        RetTy = MemTy;
715      } else {
716        // Value is returned through parameter before the order.
717        RetTy = getContext().VoidTy;
718        Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
719                 getContext().VoidPtrTy);
720      }
721    }
722    // order is always the last parameter
723    Args.add(RValue::get(Order),
724             getContext().IntTy);
725
726    const CGFunctionInfo &FuncInfo =
727        CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
728            FunctionType::ExtInfo(), RequiredArgs::All);
729    llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
730    llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
731    RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
732    if (!RetTy->isVoidType())
733      return Res;
734    if (E->getType()->isVoidType())
735      return RValue::get(nullptr);
736    return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
737  }
738
739  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
740                 E->getOp() == AtomicExpr::AO__atomic_store ||
741                 E->getOp() == AtomicExpr::AO__atomic_store_n;
742  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
743                E->getOp() == AtomicExpr::AO__atomic_load ||
744                E->getOp() == AtomicExpr::AO__atomic_load_n;
745
746  llvm::Type *IPtrTy =
747      llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
748  llvm::Value *OrigDest = Dest;
749  Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
750  if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
751  if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
752  if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
753
754  if (isa<llvm::ConstantInt>(Order)) {
755    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
756    switch (ord) {
757    case AtomicExpr::AO_ABI_memory_order_relaxed:
758      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
759                   Size, Align, llvm::Monotonic);
760      break;
761    case AtomicExpr::AO_ABI_memory_order_consume:
762    case AtomicExpr::AO_ABI_memory_order_acquire:
763      if (IsStore)
764        break; // Avoid crashing on code with undefined behavior
765      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
766                   Size, Align, llvm::Acquire);
767      break;
768    case AtomicExpr::AO_ABI_memory_order_release:
769      if (IsLoad)
770        break; // Avoid crashing on code with undefined behavior
771      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
772                   Size, Align, llvm::Release);
773      break;
774    case AtomicExpr::AO_ABI_memory_order_acq_rel:
775      if (IsLoad || IsStore)
776        break; // Avoid crashing on code with undefined behavior
777      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
778                   Size, Align, llvm::AcquireRelease);
779      break;
780    case AtomicExpr::AO_ABI_memory_order_seq_cst:
781      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
782                   Size, Align, llvm::SequentiallyConsistent);
783      break;
784    default: // invalid order
785      // We should not ever get here normally, but it's hard to
786      // enforce that in general.
787      break;
788    }
789    if (E->getType()->isVoidType())
790      return RValue::get(nullptr);
791    return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
792  }
793
794  // Long case, when Order isn't obviously constant.
795
796  // Create all the relevant BB's
797  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
798                   *ReleaseBB = nullptr, *AcqRelBB = nullptr,
799                   *SeqCstBB = nullptr;
800  MonotonicBB = createBasicBlock("monotonic", CurFn);
801  if (!IsStore)
802    AcquireBB = createBasicBlock("acquire", CurFn);
803  if (!IsLoad)
804    ReleaseBB = createBasicBlock("release", CurFn);
805  if (!IsLoad && !IsStore)
806    AcqRelBB = createBasicBlock("acqrel", CurFn);
807  SeqCstBB = createBasicBlock("seqcst", CurFn);
808  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
809
810  // Create the switch for the split
811  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
812  // doesn't matter unless someone is crazy enough to use something that
813  // doesn't fold to a constant for the ordering.
814  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
815  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
816
817  // Emit all the different atomics
818  Builder.SetInsertPoint(MonotonicBB);
819  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
820               Size, Align, llvm::Monotonic);
821  Builder.CreateBr(ContBB);
822  if (!IsStore) {
823    Builder.SetInsertPoint(AcquireBB);
824    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
825                 Size, Align, llvm::Acquire);
826    Builder.CreateBr(ContBB);
827    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
828                AcquireBB);
829    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
830                AcquireBB);
831  }
832  if (!IsLoad) {
833    Builder.SetInsertPoint(ReleaseBB);
834    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
835                 Size, Align, llvm::Release);
836    Builder.CreateBr(ContBB);
837    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
838                ReleaseBB);
839  }
840  if (!IsLoad && !IsStore) {
841    Builder.SetInsertPoint(AcqRelBB);
842    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
843                 Size, Align, llvm::AcquireRelease);
844    Builder.CreateBr(ContBB);
845    SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
846                AcqRelBB);
847  }
848  Builder.SetInsertPoint(SeqCstBB);
849  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
850               Size, Align, llvm::SequentiallyConsistent);
851  Builder.CreateBr(ContBB);
852  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
853              SeqCstBB);
854
855  // Cleanup and return
856  Builder.SetInsertPoint(ContBB);
857  if (E->getType()->isVoidType())
858    return RValue::get(nullptr);
859  return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
860}
861
862llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
863  unsigned addrspace =
864    cast<llvm::PointerType>(addr->getType())->getAddressSpace();
865  llvm::IntegerType *ty =
866    llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
867  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
868}
869
870RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
871                                       AggValueSlot resultSlot,
872                                       SourceLocation loc) const {
873  if (EvaluationKind == TEK_Aggregate)
874    return resultSlot.asRValue();
875
876  // Drill into the padding structure if we have one.
877  if (hasPadding())
878    addr = CGF.Builder.CreateStructGEP(addr, 0);
879
880  // Otherwise, just convert the temporary to an r-value using the
881  // normal conversion routine.
882  return CGF.convertTempToRValue(addr, getValueType(), loc);
883}
884
885/// Emit a load from an l-value of atomic type.  Note that the r-value
886/// we produce is an r-value of the atomic *value* type.
887RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
888                                       AggValueSlot resultSlot) {
889  AtomicInfo atomics(*this, src);
890
891  // Check whether we should use a library call.
892  if (atomics.shouldUseLibcall()) {
893    llvm::Value *tempAddr;
894    if (!resultSlot.isIgnored()) {
895      assert(atomics.getEvaluationKind() == TEK_Aggregate);
896      tempAddr = resultSlot.getAddr();
897    } else {
898      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
899    }
900
901    // void __atomic_load(size_t size, void *mem, void *return, int order);
902    CallArgList args;
903    args.add(RValue::get(atomics.getAtomicSizeValue()),
904             getContext().getSizeType());
905    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
906             getContext().VoidPtrTy);
907    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
908             getContext().VoidPtrTy);
909    args.add(RValue::get(llvm::ConstantInt::get(
910                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
911             getContext().IntTy);
912    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
913
914    // Produce the r-value.
915    return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
916  }
917
918  // Okay, we're doing this natively.
919  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
920  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
921  load->setAtomic(llvm::SequentiallyConsistent);
922
923  // Other decoration.
924  load->setAlignment(src.getAlignment().getQuantity());
925  if (src.isVolatileQualified())
926    load->setVolatile(true);
927  if (src.getTBAAInfo())
928    CGM.DecorateInstruction(load, src.getTBAAInfo());
929
930  // Okay, turn that back into the original value type.
931  QualType valueType = atomics.getValueType();
932  llvm::Value *result = load;
933
934  // If we're ignoring an aggregate return, don't do anything.
935  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
936    return RValue::getAggregate(nullptr, false);
937
938  // The easiest way to do this this is to go through memory, but we
939  // try not to in some easy cases.
940  if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
941    llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
942    if (isa<llvm::IntegerType>(resultTy)) {
943      assert(result->getType() == resultTy);
944      result = EmitFromMemory(result, valueType);
945    } else if (isa<llvm::PointerType>(resultTy)) {
946      result = Builder.CreateIntToPtr(result, resultTy);
947    } else {
948      result = Builder.CreateBitCast(result, resultTy);
949    }
950    return RValue::get(result);
951  }
952
953  // Create a temporary.  This needs to be big enough to hold the
954  // atomic integer.
955  llvm::Value *temp;
956  bool tempIsVolatile = false;
957  CharUnits tempAlignment;
958  if (atomics.getEvaluationKind() == TEK_Aggregate) {
959    assert(!resultSlot.isIgnored());
960    temp = resultSlot.getAddr();
961    tempAlignment = atomics.getValueAlignment();
962    tempIsVolatile = resultSlot.isVolatile();
963  } else {
964    temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
965    tempAlignment = atomics.getAtomicAlignment();
966  }
967
968  // Slam the integer into the temporary.
969  llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
970  Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
971    ->setVolatile(tempIsVolatile);
972
973  return atomics.convertTempToRValue(temp, resultSlot, loc);
974}
975
976
977
978/// Copy an r-value into memory as part of storing to an atomic type.
979/// This needs to create a bit-pattern suitable for atomic operations.
980void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
981  // If we have an r-value, the rvalue should be of the atomic type,
982  // which means that the caller is responsible for having zeroed
983  // any padding.  Just do an aggregate copy of that type.
984  if (rvalue.isAggregate()) {
985    CGF.EmitAggregateCopy(dest.getAddress(),
986                          rvalue.getAggregateAddr(),
987                          getAtomicType(),
988                          (rvalue.isVolatileQualified()
989                           || dest.isVolatileQualified()),
990                          dest.getAlignment());
991    return;
992  }
993
994  // Okay, otherwise we're copying stuff.
995
996  // Zero out the buffer if necessary.
997  emitMemSetZeroIfNecessary(dest);
998
999  // Drill past the padding if present.
1000  dest = projectValue(dest);
1001
1002  // Okay, store the rvalue in.
1003  if (rvalue.isScalar()) {
1004    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1005  } else {
1006    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1007  }
1008}
1009
1010
1011/// Materialize an r-value into memory for the purposes of storing it
1012/// to an atomic type.
1013llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1014  // Aggregate r-values are already in memory, and EmitAtomicStore
1015  // requires them to be values of the atomic type.
1016  if (rvalue.isAggregate())
1017    return rvalue.getAggregateAddr();
1018
1019  // Otherwise, make a temporary and materialize into it.
1020  llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1021  LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1022  emitCopyIntoMemory(rvalue, tempLV);
1023  return temp;
1024}
1025
1026/// Emit a store to an l-value of atomic type.
1027///
1028/// Note that the r-value is expected to be an r-value *of the atomic
1029/// type*; this means that for aggregate r-values, it should include
1030/// storage for any padding that was necessary.
1031void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1032  // If this is an aggregate r-value, it should agree in type except
1033  // maybe for address-space qualification.
1034  assert(!rvalue.isAggregate() ||
1035         rvalue.getAggregateAddr()->getType()->getPointerElementType()
1036           == dest.getAddress()->getType()->getPointerElementType());
1037
1038  AtomicInfo atomics(*this, dest);
1039
1040  // If this is an initialization, just put the value there normally.
1041  if (isInit) {
1042    atomics.emitCopyIntoMemory(rvalue, dest);
1043    return;
1044  }
1045
1046  // Check whether we should use a library call.
1047  if (atomics.shouldUseLibcall()) {
1048    // Produce a source address.
1049    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1050
1051    // void __atomic_store(size_t size, void *mem, void *val, int order)
1052    CallArgList args;
1053    args.add(RValue::get(atomics.getAtomicSizeValue()),
1054             getContext().getSizeType());
1055    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1056             getContext().VoidPtrTy);
1057    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1058             getContext().VoidPtrTy);
1059    args.add(RValue::get(llvm::ConstantInt::get(
1060                 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
1061             getContext().IntTy);
1062    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1063    return;
1064  }
1065
1066  // Okay, we're doing this natively.
1067  llvm::Value *intValue;
1068
1069  // If we've got a scalar value of the right size, try to avoid going
1070  // through memory.
1071  if (rvalue.isScalar() && !atomics.hasPadding()) {
1072    llvm::Value *value = rvalue.getScalarVal();
1073    if (isa<llvm::IntegerType>(value->getType())) {
1074      intValue = value;
1075    } else {
1076      llvm::IntegerType *inputIntTy =
1077        llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1078      if (isa<llvm::PointerType>(value->getType())) {
1079        intValue = Builder.CreatePtrToInt(value, inputIntTy);
1080      } else {
1081        intValue = Builder.CreateBitCast(value, inputIntTy);
1082      }
1083    }
1084
1085  // Otherwise, we need to go through memory.
1086  } else {
1087    // Put the r-value in memory.
1088    llvm::Value *addr = atomics.materializeRValue(rvalue);
1089
1090    // Cast the temporary to the atomic int type and pull a value out.
1091    addr = atomics.emitCastToAtomicIntPointer(addr);
1092    intValue = Builder.CreateAlignedLoad(addr,
1093                                 atomics.getAtomicAlignment().getQuantity());
1094  }
1095
1096  // Do the atomic store.
1097  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1098  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1099
1100  // Initializations don't need to be atomic.
1101  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1102
1103  // Other decoration.
1104  store->setAlignment(dest.getAlignment().getQuantity());
1105  if (dest.isVolatileQualified())
1106    store->setVolatile(true);
1107  if (dest.getTBAAInfo())
1108    CGM.DecorateInstruction(store, dest.getTBAAInfo());
1109}
1110
1111void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1112  AtomicInfo atomics(*this, dest);
1113
1114  switch (atomics.getEvaluationKind()) {
1115  case TEK_Scalar: {
1116    llvm::Value *value = EmitScalarExpr(init);
1117    atomics.emitCopyIntoMemory(RValue::get(value), dest);
1118    return;
1119  }
1120
1121  case TEK_Complex: {
1122    ComplexPairTy value = EmitComplexExpr(init);
1123    atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1124    return;
1125  }
1126
1127  case TEK_Aggregate: {
1128    // Fix up the destination if the initializer isn't an expression
1129    // of atomic type.
1130    bool Zeroed = false;
1131    if (!init->getType()->isAtomicType()) {
1132      Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
1133      dest = atomics.projectValue(dest);
1134    }
1135
1136    // Evaluate the expression directly into the destination.
1137    AggValueSlot slot = AggValueSlot::forLValue(dest,
1138                                        AggValueSlot::IsNotDestructed,
1139                                        AggValueSlot::DoesNotNeedGCBarriers,
1140                                        AggValueSlot::IsNotAliased,
1141                                        Zeroed ? AggValueSlot::IsZeroed :
1142                                                 AggValueSlot::IsNotZeroed);
1143
1144    EmitAggExpr(init, slot);
1145    return;
1146  }
1147  }
1148  llvm_unreachable("bad evaluation kind");
1149}
1150