dequantize.model.cpp revision 45bf79e5b9fee354fde7c1f64417d9ca4a1da7da
1// Generated file (from: dequantize.mod.py). Do not edit
2void CreateModel(Model *model) {
3  OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
4  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.f, 0);
5  // Phase 1, operands
6  auto op1 = model->addOperand(&type0);
7  auto op2 = model->addOperand(&type1);
8  // Phase 2, operations
9  model->addOperation(ANEURALNETWORKS_DEQUANTIZE, {op1}, {op2});
10  // Phase 3, inputs and outputs
11  model->setInputsAndOutputs(
12    {op1},
13    {op2});
14  assert(model->isValid());
15}
16
17bool is_ignored(int i) {
18  static std::set<int> ignore = {};
19  return ignore.find(i) != ignore.end();
20}
21