1//===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass (at IR level) to replace atomic instructions with
11// appropriate (intrinsic-based) ldrex/strex loops.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/CodeGen/Passes.h"
16#include "llvm/IR/Function.h"
17#include "llvm/IR/IRBuilder.h"
18#include "llvm/IR/Instructions.h"
19#include "llvm/IR/Intrinsics.h"
20#include "llvm/IR/Module.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Target/TargetLowering.h"
23#include "llvm/Target/TargetMachine.h"
24#include "llvm/Target/TargetSubtargetInfo.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "arm-atomic-expand"
29
30namespace {
31  class AtomicExpandLoadLinked : public FunctionPass {
32    const TargetMachine *TM;
33  public:
34    static char ID; // Pass identification, replacement for typeid
35    explicit AtomicExpandLoadLinked(const TargetMachine *TM = nullptr)
36      : FunctionPass(ID), TM(TM) {
37      initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry());
38    }
39
40    bool runOnFunction(Function &F) override;
41    bool expandAtomicInsts(Function &F);
42
43    bool expandAtomicLoad(LoadInst *LI);
44    bool expandAtomicStore(StoreInst *LI);
45    bool expandAtomicRMW(AtomicRMWInst *AI);
46    bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
47
48    AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
49    void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
50  };
51}
52
53char AtomicExpandLoadLinked::ID = 0;
54char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID;
55INITIALIZE_TM_PASS(AtomicExpandLoadLinked, "atomic-ll-sc",
56    "Expand Atomic calls in terms of load-linked & store-conditional",
57    false, false)
58
59FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) {
60  return new AtomicExpandLoadLinked(TM);
61}
62
63bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
64  if (!TM || !TM->getSubtargetImpl()->enableAtomicExpandLoadLinked())
65    return false;
66
67  SmallVector<Instruction *, 1> AtomicInsts;
68
69  // Changing control-flow while iterating through it is a bad idea, so gather a
70  // list of all atomic instructions before we start.
71  for (BasicBlock &BB : F)
72    for (Instruction &Inst : BB) {
73      if (isa<AtomicRMWInst>(&Inst) || isa<AtomicCmpXchgInst>(&Inst) ||
74          (isa<LoadInst>(&Inst) && cast<LoadInst>(&Inst)->isAtomic()) ||
75          (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
76        AtomicInsts.push_back(&Inst);
77    }
78
79  bool MadeChange = false;
80  for (Instruction *Inst : AtomicInsts) {
81    if (!TM->getTargetLowering()->shouldExpandAtomicInIR(Inst))
82      continue;
83
84    if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
85      MadeChange |= expandAtomicRMW(AI);
86    else if (AtomicCmpXchgInst *CI = dyn_cast<AtomicCmpXchgInst>(Inst))
87      MadeChange |= expandAtomicCmpXchg(CI);
88    else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
89      MadeChange |= expandAtomicLoad(LI);
90    else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
91      MadeChange |= expandAtomicStore(SI);
92    else
93      llvm_unreachable("Unknown atomic instruction");
94  }
95
96  return MadeChange;
97}
98
99bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
100  // Load instructions don't actually need a leading fence, even in the
101  // SequentiallyConsistent case.
102  AtomicOrdering MemOpOrder =
103      TM->getTargetLowering()->getInsertFencesForAtomic() ? Monotonic
104                                                          : LI->getOrdering();
105
106  // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
107  // an ldrexd (A3.5.3).
108  IRBuilder<> Builder(LI);
109  Value *Val = TM->getTargetLowering()->emitLoadLinked(
110      Builder, LI->getPointerOperand(), MemOpOrder);
111
112  insertTrailingFence(Builder, LI->getOrdering());
113
114  LI->replaceAllUsesWith(Val);
115  LI->eraseFromParent();
116
117  return true;
118}
119
120bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
121  // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
122  // we need a loop and the entire instruction is essentially an "atomicrmw
123  // xchg" that ignores the value loaded.
124  IRBuilder<> Builder(SI);
125  AtomicRMWInst *AI =
126      Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
127                              SI->getValueOperand(), SI->getOrdering());
128  SI->eraseFromParent();
129
130  // Now we have an appropriate swap instruction, lower it as usual.
131  return expandAtomicRMW(AI);
132}
133
134bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
135  AtomicOrdering Order = AI->getOrdering();
136  Value *Addr = AI->getPointerOperand();
137  BasicBlock *BB = AI->getParent();
138  Function *F = BB->getParent();
139  LLVMContext &Ctx = F->getContext();
140
141  // Given: atomicrmw some_op iN* %addr, iN %incr ordering
142  //
143  // The standard expansion we produce is:
144  //     [...]
145  //     fence?
146  // atomicrmw.start:
147  //     %loaded = @load.linked(%addr)
148  //     %new = some_op iN %loaded, %incr
149  //     %stored = @store_conditional(%new, %addr)
150  //     %try_again = icmp i32 ne %stored, 0
151  //     br i1 %try_again, label %loop, label %atomicrmw.end
152  // atomicrmw.end:
153  //     fence?
154  //     [...]
155  BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
156  BasicBlock *LoopBB =  BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
157
158  // This grabs the DebugLoc from AI.
159  IRBuilder<> Builder(AI);
160
161  // The split call above "helpfully" added a branch at the end of BB (to the
162  // wrong place), but we might want a fence too. It's easiest to just remove
163  // the branch entirely.
164  std::prev(BB->end())->eraseFromParent();
165  Builder.SetInsertPoint(BB);
166  AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
167  Builder.CreateBr(LoopBB);
168
169  // Start the main loop block now that we've taken care of the preliminaries.
170  Builder.SetInsertPoint(LoopBB);
171  Value *Loaded =
172      TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
173
174  Value *NewVal;
175  switch (AI->getOperation()) {
176  case AtomicRMWInst::Xchg:
177    NewVal = AI->getValOperand();
178    break;
179  case AtomicRMWInst::Add:
180    NewVal = Builder.CreateAdd(Loaded, AI->getValOperand(), "new");
181    break;
182  case AtomicRMWInst::Sub:
183    NewVal = Builder.CreateSub(Loaded, AI->getValOperand(), "new");
184    break;
185  case AtomicRMWInst::And:
186    NewVal = Builder.CreateAnd(Loaded, AI->getValOperand(), "new");
187    break;
188  case AtomicRMWInst::Nand:
189    NewVal = Builder.CreateNot(Builder.CreateAnd(Loaded, AI->getValOperand()),
190                               "new");
191    break;
192  case AtomicRMWInst::Or:
193    NewVal = Builder.CreateOr(Loaded, AI->getValOperand(), "new");
194    break;
195  case AtomicRMWInst::Xor:
196    NewVal = Builder.CreateXor(Loaded, AI->getValOperand(), "new");
197    break;
198  case AtomicRMWInst::Max:
199    NewVal = Builder.CreateICmpSGT(Loaded, AI->getValOperand());
200    NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
201    break;
202  case AtomicRMWInst::Min:
203    NewVal = Builder.CreateICmpSLE(Loaded, AI->getValOperand());
204    NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
205    break;
206  case AtomicRMWInst::UMax:
207    NewVal = Builder.CreateICmpUGT(Loaded, AI->getValOperand());
208    NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
209    break;
210  case AtomicRMWInst::UMin:
211    NewVal = Builder.CreateICmpULE(Loaded, AI->getValOperand());
212    NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
213    break;
214  default:
215    llvm_unreachable("Unknown atomic op");
216  }
217
218  Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
219      Builder, NewVal, Addr, MemOpOrder);
220  Value *TryAgain = Builder.CreateICmpNE(
221      StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
222  Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
223
224  Builder.SetInsertPoint(ExitBB, ExitBB->begin());
225  insertTrailingFence(Builder, Order);
226
227  AI->replaceAllUsesWith(Loaded);
228  AI->eraseFromParent();
229
230  return true;
231}
232
233bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
234  AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
235  AtomicOrdering FailureOrder = CI->getFailureOrdering();
236  Value *Addr = CI->getPointerOperand();
237  BasicBlock *BB = CI->getParent();
238  Function *F = BB->getParent();
239  LLVMContext &Ctx = F->getContext();
240
241  // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
242  //
243  // The full expansion we produce is:
244  //     [...]
245  //     fence?
246  // cmpxchg.start:
247  //     %loaded = @load.linked(%addr)
248  //     %should_store = icmp eq %loaded, %desired
249  //     br i1 %should_store, label %cmpxchg.trystore,
250  //                          label %cmpxchg.failure
251  // cmpxchg.trystore:
252  //     %stored = @store_conditional(%new, %addr)
253  //     %success = icmp eq i32 %stored, 0
254  //     br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
255  // cmpxchg.success:
256  //     fence?
257  //     br label %cmpxchg.end
258  // cmpxchg.failure:
259  //     fence?
260  //     br label %cmpxchg.end
261  // cmpxchg.end:
262  //     %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
263  //     %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
264  //     %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
265  //     [...]
266  BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
267  auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
268  auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
269  auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
270  auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
271
272  // This grabs the DebugLoc from CI
273  IRBuilder<> Builder(CI);
274
275  // The split call above "helpfully" added a branch at the end of BB (to the
276  // wrong place), but we might want a fence too. It's easiest to just remove
277  // the branch entirely.
278  std::prev(BB->end())->eraseFromParent();
279  Builder.SetInsertPoint(BB);
280  AtomicOrdering MemOpOrder = insertLeadingFence(Builder, SuccessOrder);
281  Builder.CreateBr(LoopBB);
282
283  // Start the main loop block now that we've taken care of the preliminaries.
284  Builder.SetInsertPoint(LoopBB);
285  Value *Loaded =
286      TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
287  Value *ShouldStore =
288      Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
289
290  // If the the cmpxchg doesn't actually need any ordering when it fails, we can
291  // jump straight past that fence instruction (if it exists).
292  Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
293
294  Builder.SetInsertPoint(TryStoreBB);
295  Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
296      Builder, CI->getNewValOperand(), Addr, MemOpOrder);
297  StoreSuccess = Builder.CreateICmpEQ(
298      StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
299  Builder.CreateCondBr(StoreSuccess, SuccessBB,
300                       CI->isWeak() ? FailureBB : LoopBB);
301
302  // Make sure later instructions don't get reordered with a fence if necessary.
303  Builder.SetInsertPoint(SuccessBB);
304  insertTrailingFence(Builder, SuccessOrder);
305  Builder.CreateBr(ExitBB);
306
307  Builder.SetInsertPoint(FailureBB);
308  insertTrailingFence(Builder, FailureOrder);
309  Builder.CreateBr(ExitBB);
310
311  // Finally, we have control-flow based knowledge of whether the cmpxchg
312  // succeeded or not. We expose this to later passes by converting any
313  // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
314
315  // Setup the builder so we can create any PHIs we need.
316  Builder.SetInsertPoint(ExitBB, ExitBB->begin());
317  PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
318  Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
319  Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
320
321  // Look for any users of the cmpxchg that are just comparing the loaded value
322  // against the desired one, and replace them with the CFG-derived version.
323  SmallVector<ExtractValueInst *, 2> PrunedInsts;
324  for (auto User : CI->users()) {
325    ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
326    if (!EV)
327      continue;
328
329    assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
330           "weird extraction from { iN, i1 }");
331
332    if (EV->getIndices()[0] == 0)
333      EV->replaceAllUsesWith(Loaded);
334    else
335      EV->replaceAllUsesWith(Success);
336
337    PrunedInsts.push_back(EV);
338  }
339
340  // We can remove the instructions now we're no longer iterating through them.
341  for (auto EV : PrunedInsts)
342    EV->eraseFromParent();
343
344  if (!CI->use_empty()) {
345    // Some use of the full struct return that we don't understand has happened,
346    // so we've got to reconstruct it properly.
347    Value *Res;
348    Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
349    Res = Builder.CreateInsertValue(Res, Success, 1);
350
351    CI->replaceAllUsesWith(Res);
352  }
353
354  CI->eraseFromParent();
355  return true;
356}
357
358AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
359                                                       AtomicOrdering Ord) {
360  if (!TM->getTargetLowering()->getInsertFencesForAtomic())
361    return Ord;
362
363  if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
364    Builder.CreateFence(Release);
365
366  // The exclusive operations don't need any barrier if we're adding separate
367  // fences.
368  return Monotonic;
369}
370
371void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
372                                              AtomicOrdering Ord) {
373  if (!TM->getTargetLowering()->getInsertFencesForAtomic())
374    return;
375
376  if (Ord == Acquire || Ord == AcquireRelease)
377    Builder.CreateFence(Acquire);
378  else if (Ord == SequentiallyConsistent)
379    Builder.CreateFence(SequentiallyConsistent);
380}
381