1/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h"
17
18#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
19#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
20
21namespace xla {
22void KernelSupportLibrary::For(
23    tensorflow::StringPiece name, llvm::Value* start, llvm::Value* end,
24    llvm::Value* step,
25    const std::function<void(llvm::Value*, bool)>& for_body_generator) {
26  If(ir_builder_->CreateICmpSLT(start, end), [&]() {
27    for_body_generator(start, /*is_first_iteration=*/true);
28    For(name, ir_builder_->CreateAdd(start, step), end, step,
29        [&](llvm::Value* iv) { for_body_generator(iv, false); });
30  });
31}
32
33void KernelSupportLibrary::For(
34    tensorflow::StringPiece name, llvm::Value* start, llvm::Value* end,
35    llvm::Value* step, bool peel_first_iteration,
36    const std::function<void(llvm::Value*, llvm::Value*)>& for_body_generator) {
37  if (peel_first_iteration) {
38    For(name, start, end, step, true,
39        [&](llvm::Value* indvar, bool is_first_iteration) {
40          for_body_generator(indvar, ir_builder_->getInt1(is_first_iteration));
41        });
42  } else {
43    std::unique_ptr<llvm_ir::ForLoop> loop = llvm_ir::ForLoop::EmitForLoop(
44        name, start, end, step, ir_builder_,
45        /*prevent_unrolling=*/prevent_unrolling_,
46        /*prevent_vectorization=*/prevent_vectorization_);
47    ir_builder_->SetInsertPoint(&loop->GetBodyBasicBlock()->back());
48    for_body_generator(loop->GetIndVarValue(),
49                       /*is_first_iteration=*/ir_builder_->CreateICmpEQ(
50                           loop->GetIndVarValue(), start));
51    llvm_ir::SetToLastInsertPoint(loop->GetExitBasicBlock(), ir_builder_);
52  }
53}
54
55void KernelSupportLibrary::If(
56    llvm::Value* condition, const std::function<void()>& true_block_generator,
57    const std::function<void()>& false_block_generator) {
58  llvm_ir::LlvmIfData if_data =
59      llvm_ir::EmitIfThenElse(condition, "", ir_builder_);
60  ir_builder_->SetInsertPoint(&if_data.true_block->back());
61  true_block_generator();
62  ir_builder_->SetInsertPoint(&if_data.false_block->back());
63  false_block_generator();
64  llvm_ir::SetToLastInsertPoint(if_data.after_block, ir_builder_);
65}
66
67void KernelSupportLibrary::EmitAndCallOutlinedKernel(
68    bool enable_fast_math, bool optimize_for_size,
69    llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece kernel_name,
70    KernelSupportLibrary::ArgumentVector arguments,
71    const std::function<void(KernelSupportLibrary::ArgumentVector)>&
72        kernel_body_generator) {
73  llvm::Module* module = ir_builder->GetInsertBlock()->getModule();
74  llvm::Function* function =
75      module->getFunction(llvm_ir::AsStringRef(kernel_name));
76
77  int64 null_arg_idx = -1;
78  std::vector<llvm::Value*> sanitized_args;
79  sanitized_args.reserve(arguments.size());
80  for (int64 i = 0, e = arguments.size(); i < e; i++) {
81    if (arguments[i]) {
82      sanitized_args.push_back(arguments[i]);
83    } else {
84      CHECK_EQ(null_arg_idx, -1);
85      null_arg_idx = i;
86    }
87  }
88
89  if (!function) {
90    VLOG(2) << "Generating kernel for " << kernel_name;
91    std::vector<llvm::Type*> arg_types;
92    std::transform(sanitized_args.begin(), sanitized_args.end(),
93                   std::back_inserter(arg_types),
94                   [](llvm::Value* arg) { return arg->getType(); });
95
96    auto* function_type = llvm::FunctionType::get(
97        ir_builder->getVoidTy(), arg_types, /*isVarArg=*/false);
98
99    function = llvm_ir::CreateFunction(
100        function_type, llvm::GlobalValue::InternalLinkage,
101        /*enable_fast_math=*/enable_fast_math,
102        /*optimize_for_size=*/optimize_for_size, kernel_name, module);
103
104    llvm::IRBuilder<>::InsertPointGuard guard(*ir_builder);
105
106    auto* entry_bb =
107        llvm::BasicBlock::Create(ir_builder->getContext(), "entry", function);
108    auto* return_inst = llvm::ReturnInst::Create(ir_builder->getContext(),
109                                                 /*retVal=*/nullptr, entry_bb);
110    // Set the insert point to before return_inst.
111    ir_builder->SetInsertPoint(return_inst);
112
113    std::vector<llvm::Value*> arg_values;
114    /*
115     * clang on OSX doesn't like std::transform or range for loop here.
116     * See https://github.com/tensorflow/tensorflow/issues/15196
117     */
118    for (llvm::Function::arg_iterator arg = function->arg_begin(),
119                                      arg_e = function->arg_end();
120         arg != arg_e; ++arg) {
121      arg_values.push_back(arg);
122    }
123    if (null_arg_idx != -1) {
124      arg_values.insert(arg_values.begin() + null_arg_idx, nullptr);
125    }
126    kernel_body_generator(arg_values);
127  } else {
128    VLOG(3) << "Re-using kernel for " << kernel_name;
129  }
130
131  ir_builder->CreateCall(function, llvm_ir::AsArrayRef(sanitized_args));
132}
133
134}  // namespace xla
135