Lines Matching refs:alphas

229     SK_ABORT("Don't use this; directly add alphas to the mask.");
344 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
588 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
589 static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r,
597 alphas[0] = getPartialAlpha(((R << 17) - l - r) >> 9, fullAlpha);
602 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
605 alphas[i] = alpha16 >> 8;
608 alphas[R - 1] = fullAlpha - partialTriangleToAlpha(last, dY);
612 // Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
614 SkAlpha* alphas, SkFixed l, SkFixed r, SkFixed dY, SkAlpha fullAlpha) {
621 alphas[0] = getPartialAlpha(trapezoidToAlpha(l, r), fullAlpha);
626 alphas[R-1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
629 alphas[i] = alpha16 >> 8;
632 alphas[0] = fullAlpha - partialTriangleToAlpha(first, dY);
719 SkAlpha* alphas;
722 alphas = (SkAlpha*)quickMemory;
724 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
727 SkAlpha* tempAlphas = alphas + len + 1;
728 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
732 alphas[i] = fullAlpha;
743 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
744 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
749 if (alphas[i - L] > tempAlphas[i - L]) {
750 alphas[i - L] -= tempAlphas[i - L];
752 alphas[i - L] = 0;
764 alphas[len-2] = alphas[len-2] > a1 ? alphas[len-2] - a1 : 0;
765 alphas[len-1] = alphas[len-1] > a2 ? alphas[len-1] - a2 : 0;
770 if (alphas[i - L] > tempAlphas[i - L]) {
771 alphas[i - L] -= tempAlphas[i - L];
773 alphas[i - L] = 0;
781 safelyAddAlpha(&maskRow[L + i], alphas[i]);
783 addAlpha(&maskRow[L + i], alphas[i]);
789 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
791 blitter->blitAntiH(L, y, alphas, len);
796 delete [] alphas;
1710 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need