/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
H A D | TensorReductionCuda.h | 25 __device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) { argument 31 reducer.reduce(accum, reinterpret_cast<T*>(&newval)); 39 reducer.reduce(accum, reinterpret_cast<T*>(&newval)); 48 reducer.reduce(accum, reinterpret_cast<T*>(&newval)); 56 reducer.reduce(accum, reinterpret_cast<T*>(&newval)); 84 __device__ inline void atomicReduce(half2* output, half2 accum, R<half>& reducer) { argument 87 reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval)); 95 reducer.reducePacket(accum, reinterpret_cast<half2*>(&newval)); 125 __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs, argument 132 *output = reducer 191 ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half2* scratch) argument 204 ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) argument 219 FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output, half2* scratch) argument 264 ReductionCleanupKernelHalfFloat(Op& reducer, half* output, half2* scratch) argument 288 run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs) argument 315 run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) argument 359 run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) argument 374 InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, typename Self::CoeffReturnType* output) argument 446 InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, half* output) argument 555 run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) argument 596 run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) argument 649 run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) argument 667 OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, typename Self::CoeffReturnType* output) argument 709 run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) argument [all...] |
H A D | TensorReduction.h | 131 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { argument 135 GenericDimReducer<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum); 141 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { argument 144 reducer.reduce(self.m_impl.coeff(input), accum); 150 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index index, Op& reducer, typename Self::CoeffReturnType* accum) { argument 151 reducer.reduce(self.m_impl.coeff(index), accum); 157 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { argument 158 typename Self::CoeffReturnType accum = reducer.initialize(); 160 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); 162 return reducer 168 reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) argument 192 reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) argument 203 reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) argument 222 run(const Self& self, Op& reducer, const Device&, typename Self::CoeffReturnType* output) argument 234 run(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer, typename Self::CoeffReturnType* output) argument 250 run(const Self& self, Op& reducer, const ThreadPoolDevice& device, typename Self::CoeffReturnType* output) argument 363 TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) argument 371 const Op& reducer() const { return m_reducer; } function in class:Eigen::TensorReductionOp [all...] |
H A D | TensorReductionSycl.h | 97 /// For now let's start with a full reducer 99 /// we want to take reduction child and then build a construction and apply the full reducer function on it. Fullreducre applies the 108 static void run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output) { argument 144 Op functor = reducer; 187 static bool run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output, typename Self::Index , typename Self::Index num_coeffs_to_preserve) { argument 206 Op functor = reducer;
|
H A D | TensorBase.h | 519 scan(const Index& axis, const Reducer& reducer, bool exclusive = false) const { argument 520 return TensorScanOp<Reducer, const Derived>(derived(), axis, exclusive, reducer); 664 reduce(const Dims& dims, const Reducer& reducer) const { 665 return TensorReductionOp<Reducer, const Dims, const Derived>(derived(), dims, reducer);
|
/external/skia/tests/ |
H A D | PathOpsQuadReduceOrderTest.cpp | 25 SkReduceOrder reducer; local 26 SkDEBUGCODE(int result = ) reducer.reduce(quad); 33 SkReduceOrder reducer; local 55 order = reducer.reduce(quad); 64 order = reducer.reduce(quad);
|
H A D | PathOpsCubicReduceOrderTest.cpp | 63 SkReduceOrder reducer; local 112 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 123 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 126 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 135 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 146 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 157 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 168 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 171 order = reducer.reduce(cubic, SkReduceOrder::kAllow_Quadratics); 180 order = reducer [all...] |
H A D | PathOpsQuadLineIntersectionThreadedTest.cpp | 87 SkReduceOrder reducer; local 88 int order = reducer.reduce(quad);
|
H A D | PathOpsTestCommon.cpp | 64 SkReduceOrder reducer; local 65 int order = reducer.reduce(*cubic, SkReduceOrder::kAllow_Quadratics); 99 int orderP1 = reducer.reduce(pair.first(), SkReduceOrder::kNo_Quadratics); 103 int orderP2 = reducer.reduce(pair.second(), SkReduceOrder::kNo_Quadratics);
|
H A D | PathOpsConicLineIntersectionTest.cpp | 107 SkReduceOrder reducer; local 118 int order2 = reducer.reduce(line);
|
/external/skia/src/pathops/ |
H A D | SkReduceOrder.cpp | 249 SkReduceOrder reducer; local 250 int order = reducer.reduce(quad); 253 *reducePts++ = reducer.fLine[index].asSkPoint(); 275 SkReduceOrder reducer; local 276 int order = reducer.reduce(cubic, kAllow_Quadratics); 279 *reducePts++ = reducer.fQuad[index].asSkPoint();
|
/external/v8/src/compiler/ |
H A D | graph-reducer.cc | 9 #include "src/compiler/graph-reducer.h" 41 void GraphReducer::AddReducer(Reducer* reducer) { argument 42 reducers_.push_back(reducer); 65 for (Reducer* const reducer : reducers_) reducer->Finalize(); 85 // No change from this reducer. 101 // No change from any reducer. 104 // At least one reducer did some in-place reduction.
|
H A D | graph-reducer.h | 40 // A reducer can reduce or simplify a given node based on its operator and 41 // inputs. This class functions as an extension point for the graph reducer for 64 // An advanced reducer can also edit the graphs by changing and replacing nodes 68 // Observe the actions of this reducer. 132 void AddReducer(Reducer* reducer);
|
H A D | pipeline.cc | 22 #include "src/compiler/common-operator-reducer.h" 26 #include "src/compiler/escape-analysis-reducer.h" 34 #include "src/compiler/js-builtin-reducer.h" 35 #include "src/compiler/js-call-reducer.h" 52 #include "src/compiler/machine-operator-reducer.h" 64 #include "src/compiler/simplified-operator-reducer.h" 71 #include "src/compiler/value-numbering-reducer.h" 432 SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table) argument 433 : reducer_(reducer), table_(table) {} 461 Reducer* reducer) { 460 AddReducer(PipelineData* data, GraphReducer* graph_reducer, Reducer* reducer) argument [all...] |
/external/libconstrainedcrypto/ |
H A D | p256.c | 164 p256_digit reducer[P256_NDIGITS] = { 0 }; local 168 // Guestimate reducer as top * MOD, since msw of MOD is -1. 169 top_reducer = mulAdd(MOD, top, 0, reducer); 171 // Subtract reducer from top | tmp. 172 top = subTop(top_reducer, reducer, top, tmp + i);
|
/external/v8/ |
H A D | Android.v8.mk | 107 src/compiler/common-operator-reducer.cc \ 115 src/compiler/escape-analysis-reducer.cc \ 121 src/compiler/graph-reducer.cc \ 130 src/compiler/js-builtin-reducer.cc \ 131 src/compiler/js-call-reducer.cc \ 153 src/compiler/machine-operator-reducer.cc \ 179 src/compiler/simplified-operator-reducer.cc \ 189 src/compiler/value-numbering-reducer.cc \ 269 src/heap/memory-reducer.cc \
|
/external/eigen/unsupported/test/ |
H A D | cxx11_tensor_reduction.cpp | 318 UserReducer reducer(10.0f); 319 Tensor<float, 1, DataLayout> result = tensor.reduce(reduction_axis, reducer);
|
/external/v8/src/inspector/build/closure-compiler/ |
H A D | closure-compiler.jar | META-INF/MANIFEST.MF META-INF/ com/ com/google/ com/google/debugging/ com/google/debugging/sourcemap/ ... |