Searched refs:alphas (Results 1 - 25 of 43) sorted by relevance

12

/external/skia/src/opts/
H A DSkXfermode_opts.h26 XFERMODE(SrcIn) { return s.approxMulDiv255(d.alphas() ); }
27 XFERMODE(SrcOut) { return s.approxMulDiv255(d.alphas().inv()); }
28 XFERMODE(SrcOver) { return s + d.approxMulDiv255(s.alphas().inv()); }
34 XFERMODE(SrcATop) { return (s * d.alphas() + d * s.alphas().inv()).div255(); }
37 XFERMODE(Xor) { return (s * d.alphas().inv() + d * s.alphas().inv()).div255(); }
48 XFERMODE(Multiply) { return (s * d.alphas().inv() + d * s.alphas().inv() + s*d).div255(); }
51 auto m = Sk4px::Wide::Min(s * d.alphas(),
81 auto alphas = srcover; local
96 auto alphas = srcover, local
109 auto alphas = srcover, local
124 static inline Sk4f alphas(const Sk4f& f) { function in namespace:__anon20847
[all...]
H A DSk4px_SSE2.h61 inline Sk4px Sk4px::alphas() const { function in class:__anon20824::Sk4px
73 inline Sk4px Sk4px::alphas() const { function in class:__anon20824::Sk4px
H A DSkBlitRow_opts.h197 // We now detect 2 special cases: the first occurs when all alphas are zero (the 8 pixels
198 // are all transparent), the second when all alphas are fully set (they are all opaque).
199 uint8x8_t alphas = src_col.val[3]; local
200 uint64_t alphas_u64 = vget_lane_u64(vreinterpret_u64_u8(alphas), 0);
H A DSk4px_NEON.h66 inline Sk4px Sk4px::alphas() const { function in class:__anon20823::Sk4px
H A DSk4px_none.h71 inline Sk4px Sk4px::alphas() const { function in class:__anon20825::Sk4px
/external/skqp/src/opts/
H A DSkXfermode_opts.h26 XFERMODE(SrcIn) { return s.approxMulDiv255(d.alphas() ); }
27 XFERMODE(SrcOut) { return s.approxMulDiv255(d.alphas().inv()); }
28 XFERMODE(SrcOver) { return s + d.approxMulDiv255(s.alphas().inv()); }
34 XFERMODE(SrcATop) { return (s * d.alphas() + d * s.alphas().inv()).div255(); }
37 XFERMODE(Xor) { return (s * d.alphas().inv() + d * s.alphas().inv()).div255(); }
48 XFERMODE(Multiply) { return (s * d.alphas().inv() + d * s.alphas().inv() + s*d).div255(); }
51 auto m = Sk4px::Wide::Min(s * d.alphas(),
81 auto alphas = srcover; local
96 auto alphas = srcover, local
109 auto alphas = srcover, local
124 static inline Sk4f alphas(const Sk4f& f) { function in namespace:__anon21515
[all...]
H A DSk4px_SSE2.h61 inline Sk4px Sk4px::alphas() const { function in class:__anon21492::Sk4px
73 inline Sk4px Sk4px::alphas() const { function in class:__anon21492::Sk4px
H A DSkBlitRow_opts.h197 // We now detect 2 special cases: the first occurs when all alphas are zero (the 8 pixels
198 // are all transparent), the second when all alphas are fully set (they are all opaque).
199 uint8x8_t alphas = src_col.val[3]; local
200 uint64_t alphas_u64 = vget_lane_u64(vreinterpret_u64_u8(alphas), 0);
H A DSk4px_NEON.h66 inline Sk4px Sk4px::alphas() const { function in class:__anon21491::Sk4px
H A DSk4px_none.h71 inline Sk4px Sk4px::alphas() const { function in class:__anon21493::Sk4px
/external/eigen/doc/snippets/
H A DGeneralizedEigenSolver.cpp5 cout << "The (complex) numerators of the generalzied eigenvalues are: " << ges.alphas().transpose() << endl;
7 cout << "The (complex) generalzied eigenvalues are (alphas./beta): " << ges.eigenvalues().transpose() << endl;
/external/tensorflow/tensorflow/contrib/factorization/python/ops/
H A Dgmm_ops_test.py159 alphas = sess.run(gmm_tool.alphas())
160 self.assertGreater(alphas[1], 0.6)
175 alphas = sess.run(gmm_tool.alphas())
176 self.assertAlmostEqual(alphas[0], alphas[1])
194 alphas = sess.run(gmm_tool.alphas())
195 self.assertAlmostEqual(alphas[
[all...]
/external/webp/src/dsp/
H A Dalpha_processing_neon.c96 const uint8x8_t alphas = vld1_u8(alpha + i); local
97 rgbX.val[0] = alphas;
99 mask8 = vand_u8(mask8, alphas);
147 const uint8x8_t alphas = rgbX.val[0]; local
148 vst1_u8((uint8_t*)(alpha + i), alphas);
149 mask8 = vand_u8(mask8, alphas);
/external/webp/src/enc/
H A Danalysis_enc.c143 const int alphas[MAX_ALPHA + 1]) {
161 for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {}
163 for (n = MAX_ALPHA; n > min_a && alphas[n] == 0; --n) {}
184 if (alphas[a]) {
190 dist_accum[n] += a * alphas[a];
191 accum[n] += alphas[a];
225 SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
361 int alphas[MAX_ALPHA + 1],
387 alphas[best_alpha]++;
406 // distribution in alphas[]
430 int alphas[MAX_ALPHA + 1]; member in struct:__anon30418
[all...]
/external/skia/gm/
H A Dmodecolorfilters.cpp101 SkColor alphas[] = {0xFFFFFFFF, 0x80808080}; variable
129 int paintColorCnt = hasShader ? SK_ARRAY_COUNT(alphas) : SK_ARRAY_COUNT(colors);
130 SkColor* paintColors = hasShader ? alphas : colors;
/external/skqp/gm/
H A Dmodecolorfilters.cpp101 SkColor alphas[] = {0xFFFFFFFF, 0x80808080}; variable
129 int paintColorCnt = hasShader ? SK_ARRAY_COUNT(alphas) : SK_ARRAY_COUNT(colors);
130 SkColor* paintColors = hasShader ? alphas : colors;
/external/tensorflow/tensorflow/core/kernels/
H A Drandom_op_test.cc77 Tensor alphas(DT_DOUBLE, TensorShape({n}));
81 alphas.vec<double>()(i) =
84 return alphas;
/external/javaparser/javaparser-symbol-solver-core/src/main/java/com/github/javaparser/symbolsolver/resolution/typeinference/
H A DTypeInference.java75 List<InferenceVariable> alphas = InferenceVariable.instantiate(Ps);
78 theta = theta.withPair(Ps.get(0), alphas.get(0));
83 BoundSet B0 = boundSetup(Ps, alphas);
92 B1 = B1.withBound(new ThrowsBound(alphas.get(i)));
140 Optional<InstantiationSet> instantiation = B2.performResolution(alphas, typeSolver);
347 List<InferenceVariable> alphas = InferenceVariable.instantiate(interfaceDeclaration.getTypeParameters());
349 TypeInferenceCache.recordInferenceVariables(typeSolver, lambdaExpr, alphas);
H A DBoundSet.java470 private boolean thereIsSomeJSuchThatβequalAlphaJ(Set<InferenceVariable> alphas, InferenceVariable beta) { argument
471 for (InferenceVariable alphaJ : alphas) {
526 private boolean hasProperty(Set<InferenceVariable> alphas, List<VariableDependency> dependencies) { argument
527 for (InferenceVariable alphaI: alphas) {
533 if (!hasInstantiationFor(beta) && !thereIsSomeJSuchThatβequalAlphaJ(alphas, beta)) {
647 for (Set<InferenceVariable> alphas: allSetsWithProperty(uninstantiatedPortionOfV, dependencies)) {
652 boolean hasSomeCaptureForAlphas = alphas.stream().anyMatch(
661 for (InferenceVariable alphaI : alphas) {
/external/skia/src/core/
H A DSkScan_AAAPath.cpp229 SK_ABORT("Don't use this; directly add alphas to the mask.");
344 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
588 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
589 static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r,
597 alphas[0] = getPartialAlpha(((R << 17) - l - r) >> 9, fullAlpha);
602 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
605 alphas[i] = alpha16 >> 8;
608 alphas[R - 1] = fullAlpha - partialTriangleToAlpha(last, dY);
612 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
614 SkAlpha* alphas, SkFixe
[all...]
H A DSkBlitter.cpp57 SkAlpha* alphas = reinterpret_cast<SkAlpha*>(runs + runSize); local
69 alphas[0] = ScalarToAlpha(partialL * partialT);
70 alphas[1] = ScalarToAlpha(partialT);
71 alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialT);
72 this->blitAntiH(bounds.fLeft, bounds.fTop, alphas, runs);
77 alphas[0] = ScalarToAlpha(partialL * partialB);
78 alphas[1] = ScalarToAlpha(partialB);
79 alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialB);
80 this->blitAntiH(bounds.fLeft, bounds.fBottom - 1, alphas, runs);
90 SkAlpha* alphas local
[all...]
/external/skqp/src/core/
H A DSkScan_AAAPath.cpp229 SK_ABORT("Don't use this; directly add alphas to the mask.");
344 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
588 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
589 static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r,
597 alphas[0] = getPartialAlpha(((R << 17) - l - r) >> 9, fullAlpha);
602 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
605 alphas[i] = alpha16 >> 8;
608 alphas[R - 1] = fullAlpha - partialTriangleToAlpha(last, dY);
612 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
614 SkAlpha* alphas, SkFixe
[all...]
H A DSkBlitter.cpp56 SkAlpha* alphas = reinterpret_cast<SkAlpha*>(runs + runSize); local
68 alphas[0] = ScalarToAlpha(partialL * partialT);
69 alphas[1] = ScalarToAlpha(partialT);
70 alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialT);
71 this->blitAntiH(bounds.fLeft, bounds.fTop, alphas, runs);
76 alphas[0] = ScalarToAlpha(partialL * partialB);
77 alphas[1] = ScalarToAlpha(partialB);
78 alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialB);
79 this->blitAntiH(bounds.fLeft, bounds.fBottom - 1, alphas, runs);
87 SkAlpha* alphas local
[all...]
/external/tensorflow/tensorflow/python/kernel_tests/random/
H A Drandom_gamma_test.py85 alphas = [0.2, 1.0, 3.0]
87 alphas = [0.01] + alphas
88 for alpha in alphas:
/external/eigen/test/
H A Deigensolver_generalized_real.cpp62 Matrix<ComplexScalar,Dynamic,Dynamic> tmp = (eig.betas()(k)*a).template cast<ComplexScalar>() - eig.alphas()(k)*b;

Completed in 651 milliseconds

12