1a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// Copyright 2015 The Gemmlowp Authors. All Rights Reserved.
27b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//
37b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// Licensed under the Apache License, Version 2.0 (the "License");
47b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// you may not use this file except in compliance with the License.
57b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// You may obtain a copy of the License at
67b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//
77b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//     http://www.apache.org/licenses/LICENSE-2.0
87b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//
97b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// Unless required by applicable law or agreed to in writing, software
107b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// distributed under the License is distributed on an "AS IS" BASIS,
117b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
127b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// See the License for the specific language governing permissions and
137b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// limitations under the License.
147b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
157b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// output_stages.h: public definitions of the output stages that can
167b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// be assembled into an output pipeline, to control how internal
177b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// 32-bit accumulators are transformed to obtain the final uint8
187b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// result matrix entries.
197b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
207b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang#ifndef GEMMLOWP_PUBLIC_OUTPUT_STAGES_H_
217b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang#define GEMMLOWP_PUBLIC_OUTPUT_STAGES_H_
227b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
237b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang#include <tuple>
247b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
257b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang#include "../internal/common.h"
267b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
277b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangnamespace gemmlowp {
287b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
297b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This output stage takes int32 values and returns still int32 values,
307b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// but "quantized down" to the uint8 scale; in other words, its output
317b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// is typically what one would then clamp to [0..255] and cast to uint8
327b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// (see OutputStageSaturatingCastToUint8).
337b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//
347b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This "quantization down" process depends on 3 parameters,
357b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//   result_offset, result_mult_int, result_shift,
367b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// and the result is:
377b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//   ((input + result_offset) * result_mult_int + rounding) >> result_shift
387b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// where
397b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//   rounding = (result_shift < 1) ? 0 : (1 << (result_shift - 1));
407b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangstruct OutputStageQuantizeDownInt32ToUint8Scale {
417b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t result_offset;
427b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t result_mult_int;
437b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t result_shift;
447b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang};
457b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
467b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This output stage takes int32 values and returns still int32 values,
477b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// but "quantized down" to the uint8 scale; in other words, its output
487b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// is typically what one would then clamp to [0..255] and cast to uint8
497b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// (see OutputStageSaturatingCastToUint8).
507b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//
517b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This "quantization down" process depends on 3 parameters,
527b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//   result_offset, result_mult_int, result_shift,
537b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// and the result is:
547b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//   ((input + result_offset) * result_mult_int + rounding) >> result_shift
557b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// where
567b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//   rounding = (result_shift < 1) ? 0 : (1 << (result_shift - 1));
577b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang//
587b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// Difference from OutputStageQuantizeDownInt32ToUint8Scale here is that each
597b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// row or column of the output (depending on tShape) has its own result_offset
607b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// and result_mult_int numbers.
617b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangtemplate <VectorShape tShape>
627b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangstruct OutputStageQuantizeDownInt32ToUint8ScalePC {
637b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  VectorMap<const std::int32_t, tShape> result_offset;
647b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  VectorMap<const std::int32_t, tShape> result_mult_int;
657b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t result_shift;
667b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang};
677b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
68a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// This output stage takes int32 values and returns still int32 values,
697d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// but "quantized down" to a difference scale; for example, in a pipeline
707d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// that outputs uint8 values in [0..255], the output of this stage would be
717d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// int32 values ready to be clamped to [0..255] and casted to uint8
72a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// (see OutputStageSaturatingCastToUint8).
73a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//
74a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// This "quantization down" process depends on 3 parameters,
75a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//   result_offset, result_fixedpoint_multiplier, result_shift,
76a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// and the result is:
77a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//   ((FixedPointMul(input, result_fixedpoint_multiplier) +
78a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//   rounding) >> result_shift) + result_offset_after_shift
79a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// where
80a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//   rounding = (result_shift < 1) ? 0 : (1 << (result_shift - 1));
81a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// and where FixedPointMul(x, y) is the nearest integer to the following
82a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// mathematical expression, evaluated without overflow or intermediate
83a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// rounding:
84a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//   (x * y) / 2^31
85a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// In practice, it is expected that FixedPointMul will be implemented
86a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// using hardware "rounding doubling int32 multiply high" instructions,
87a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// such as VQRDMULH on ARM. See in fixedpoint.h the generic function,
88a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// SaturatingRoundingDoublingHighMul.
89a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//
90a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// Notice that the other difference from
91a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// OutputStageQuantizeDownInt32ToUint8Scale is that the result offset
92a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// is applied after the multiplier and shift, not before. This ensures
93a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// that no matter what the multiplier and shift are, the result offset
94a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// is effectively integral: offsetting the final result by an integer.
95a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// The motivation for this is to faithfully support quantization schemes
96a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// where the formula linking quantized values to the real mathematical
97a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// values that they represent, is of the form
98a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//
99a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//   real_value = scale * (quantized_value - zero_point)
100a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang//
101a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// where scale is a real number (represented in quantized form by
102a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// result_fixedpoint_multiplier and result_shift) and zero_point
103a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// is an integer telling which quantized value correspond to the
104a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// real value 0, and is represented here by (the opposite of)
105a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// result_offset_after_shift.
106a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// The motivation for such a quantization scheme, designed to
107a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// ensure that 0 is always a representable value, is that in
108a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// many applications, we need to 0-pad arrays and that can only be
109a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// done for quantized arrays if 0 is a representable value in
110a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// quantized form. In particular, convolution-like operations
111a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// are often implemented using 0-padding, or "im2col"-like
112a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// expansions that implicitly rely on 0-padding. If 0 were not
113a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// a representable value, such operations would have to pad
114a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang// using a nonzero value, introducing bias in the computation.
1157d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wangstruct OutputStageQuantizeDownInt32ByFixedPoint {
116a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang  std::int32_t result_fixedpoint_multiplier;
117a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang  std::int32_t result_shift;
118a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang  std::int32_t result_offset_after_shift;
119a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang};
120a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang
1217d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint is the old deprecated
1227d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// name of OutputStageQuantizeDownInt32ByFixedPoint, before we noticed that
1237d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// there really wasn't anything Uint8-specific about it.
1247d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wangusing OutputStageQuantizeDownInt32ToUint8ScaleByFixedPoint = OutputStageQuantizeDownInt32ByFixedPoint;
1257d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang
1267d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// Variant of OutputStageQuantizeDownInt32ByFixedPoint where the 'shift'
1277d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// is not necessarily just a right shift, so we can represent multipliers
1287d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// greater than 1. This takes an result_exponent parameter; when it's
1297d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// <= 0, this is equivalent to OutputStageQuantizeDownInt32ByFixedPoint
1307d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// with result_shift = -result_exponent.
1317d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// In the general case, this consists in first left-shifting by
1327d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// std::max(result_exponent, 0), before doing the same as
1337d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// OutputStageQuantizeDownInt32ByFixedPoint with
1347d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// result_shift = std::max(-result_exponent, 0).
1357d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wangstruct OutputStageScaleInt32ByFixedPointAndExponent {
1367d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang  std::int32_t result_fixedpoint_multiplier;
1377d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang  std::int32_t result_exponent;
1387d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang  std::int32_t result_offset_after_shift;
1397d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang};
1407d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang
1417b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This output stage takes int32 values that are expected to be already
1427b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// on the final uint8 scale, but not necessarily in the [0..255] range.
1437b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// It clamps them to the [0..255] range and returns them casted to uint8.
1447b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangstruct OutputStageSaturatingCastToUint8 {};
1457b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
1467d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// This output stage takes int32 values that are expected to be already
1477d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// on the final int16 scale, but not necessarily in the [-32768..32767] range.
1487d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang// It clamps them to the [-32768..32767] range and returns them casted to int16.
1497d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wangstruct OutputStageSaturatingCastToInt16 {};
1507d0d5a611e629e7c8946e6720baa6846ade9f015Miao Wang
1517b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This output stage depends on a "bias vector" that should contain int32
1527b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// entries, and be either a row-vector of the same number of columns as the
1537b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// result matrix, or a column-vector of the same number of rows as the
1547b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// result matrix. This output stage takes int32 values and adds to them
1557b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// the corresponding entry of the bias vector (broadcasted in the other
1567b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// direction to fit the matrix's shape), outputting int32 values.
1577b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangtemplate <typename VectorType>
1587b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangstruct OutputStageBiasAddition {
1597b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  VectorType bias_vector;
1607b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang};
1617b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
1627b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This output stage clamps value between the specified min and max bounds.
1637b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// It can be used to implement "rectified linear unit" activation functions
1647b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// in neural networks.
1657b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangstruct OutputStageClamp {
1667b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t min;
1677b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t max;
1687b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang};
1697b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
1707b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangstruct OutputStageTanh {
1717b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t real_zero_as_int32;
1727b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  std::int32_t real_amplitude_as_int32;
1737b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang};
1747b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
1757b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// An output pipeline is just a std::tuple of output stages.
1767b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This function generates a standard output pipeline consisting of two stages:
1777b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// OutputStageQuantizeDownInt32ToUint8Scale, OutputStageSaturatingCastToUint8.
1787b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wanginline std::tuple<OutputStageQuantizeDownInt32ToUint8Scale,
1797b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang                  OutputStageSaturatingCastToUint8>
1807b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao WangMakeStandardOutputPipeline(std::int32_t result_offset,
1817b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang                           std::int32_t result_mult_int,
1827b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang                           std::int32_t result_shift) {
1837b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  OutputStageQuantizeDownInt32ToUint8Scale quantize_down_stage;
1847b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  quantize_down_stage.result_offset = result_offset;
1857b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  quantize_down_stage.result_mult_int = result_mult_int;
1867b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  quantize_down_stage.result_shift = result_shift;
1877b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  OutputStageSaturatingCastToUint8 saturating_cast_stage;
1887b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  return std::make_tuple(quantize_down_stage, saturating_cast_stage);
1897b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang}
1907b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
1917b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// An output pipeline is just a std::tuple of output stages.
1927b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// This function generates a standard output pipeline consisting of two stages:
1937b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang// OutputStageQuantizeDownInt32ToUint8ScalePC, OutputStageSaturatingCastToUint8.
1947b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wangtemplate <VectorShape tShape>
1957b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wanginline std::tuple<OutputStageQuantizeDownInt32ToUint8ScalePC<tShape>,
1967b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang                  OutputStageSaturatingCastToUint8>
197a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao WangMakeStandardOutputPipeline(
198a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang    const VectorMap<const std::int32_t, tShape>& result_offset,
199a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang    const VectorMap<const std::int32_t, tShape>& result_mult_int,
200a9fd919a0080e2c3c7ed1ce451c85a4d86f2f8c1Miao Wang    std::int32_t result_shift) {
2017b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  OutputStageQuantizeDownInt32ToUint8ScalePC<tShape> quantize_down_stage;
2027b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  quantize_down_stage.result_offset = result_offset;
2037b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  quantize_down_stage.result_mult_int = result_mult_int;
2047b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  quantize_down_stage.result_shift = result_shift;
2057b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  OutputStageSaturatingCastToUint8 saturating_cast_stage;
2067b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang  return std::make_tuple(quantize_down_stage, saturating_cast_stage);
2077b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang}
2087b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
2097b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang}  // namespace gemmlowp
2107b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang
2117b05d573cf2e0fd3a58e98cdbfc65153a83fd6f1Miao Wang#endif  // GEMMLOWP_PUBLIC_OUTPUT_STAGES_H_
212