1/*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkColorPriv_DEFINED
9#define SkColorPriv_DEFINED
10
11#include "SkColor.h"
12#include "SkMath.h"
13
14/** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a
15    byte into a scale value, so that we can say scale * value >> 8 instead of
16    alpha * value / 255.
17
18    In debugging, asserts that alpha is 0..255
19*/
20static inline unsigned SkAlpha255To256(U8CPU alpha) {
21    SkASSERT(SkToU8(alpha) == alpha);
22    // this one assues that blending on top of an opaque dst keeps it that way
23    // even though it is less accurate than a+(a>>7) for non-opaque dsts
24    return alpha + 1;
25}
26
27/** Multiplify value by 0..256, and shift the result down 8
28    (i.e. return (value * alpha256) >> 8)
29 */
30#define SkAlphaMul(value, alpha256)     (((value) * (alpha256)) >> 8)
31
32static inline U8CPU SkUnitScalarClampToByte(SkScalar x) {
33    return static_cast<U8CPU>(SkScalarPin(x, 0, 1) * 255 + 0.5);
34}
35
36#define SK_A32_BITS     8
37#define SK_R32_BITS     8
38#define SK_G32_BITS     8
39#define SK_B32_BITS     8
40
41#define SK_A32_MASK     ((1 << SK_A32_BITS) - 1)
42#define SK_R32_MASK     ((1 << SK_R32_BITS) - 1)
43#define SK_G32_MASK     ((1 << SK_G32_BITS) - 1)
44#define SK_B32_MASK     ((1 << SK_B32_BITS) - 1)
45
46/*
47 *  Skia's 32bit backend only supports 1 sizzle order at a time (compile-time).
48 *  This is specified by 4 defines SK_A32_SHIFT, SK_R32_SHIFT, ... for G and B.
49 *
50 *  For easier compatibility with Skia's GPU backend, we further restrict these
51 *  to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does
52 *  not directly correspond to the same shift-order, since we have to take endianess
53 *  into account.
54 *
55 *  Here we enforce this constraint.
56 */
57
58#ifdef SK_CPU_BENDIAN
59    #define SK_RGBA_R32_SHIFT   24
60    #define SK_RGBA_G32_SHIFT   16
61    #define SK_RGBA_B32_SHIFT   8
62    #define SK_RGBA_A32_SHIFT   0
63#else
64    #define SK_RGBA_R32_SHIFT   0
65    #define SK_RGBA_G32_SHIFT   8
66    #define SK_RGBA_B32_SHIFT   16
67    #define SK_RGBA_A32_SHIFT   24
68#endif
69
70#define SkGetPackedA32(packed)      ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
71#define SkGetPackedR32(packed)      ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
72#define SkGetPackedG32(packed)      ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
73#define SkGetPackedB32(packed)      ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
74
75#define SkA32Assert(a)  SkASSERT((unsigned)(a) <= SK_A32_MASK)
76#define SkR32Assert(r)  SkASSERT((unsigned)(r) <= SK_R32_MASK)
77#define SkG32Assert(g)  SkASSERT((unsigned)(g) <= SK_G32_MASK)
78#define SkB32Assert(b)  SkASSERT((unsigned)(b) <= SK_B32_MASK)
79
80/**
81 *  Pack the components into a SkPMColor, checking (in the debug version) that
82 *  the components are 0..255, and are already premultiplied (i.e. alpha >= color)
83 */
84static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
85    SkA32Assert(a);
86    SkASSERT(r <= a);
87    SkASSERT(g <= a);
88    SkASSERT(b <= a);
89
90    return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
91           (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
92}
93
94/**
95 *  Same as SkPackARGB32, but this version guarantees to not check that the
96 *  values are premultiplied in the debug version.
97 */
98static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
99    return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
100           (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
101}
102
103static inline
104SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
105    SkA32Assert(a);
106    SkR32Assert(r);
107    SkG32Assert(g);
108    SkB32Assert(b);
109
110    if (a != 255) {
111        r = SkMulDiv255Round(r, a);
112        g = SkMulDiv255Round(g, a);
113        b = SkMulDiv255Round(b, a);
114    }
115    return SkPackARGB32(a, r, g, b);
116}
117
118// When Android is compiled optimizing for size, SkAlphaMulQ doesn't get
119// inlined; forcing inlining significantly improves performance.
120static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) {
121    uint32_t mask = 0xFF00FF;
122
123    uint32_t rb = ((c & mask) * scale) >> 8;
124    uint32_t ag = ((c >> 8) & mask) * scale;
125    return (rb & mask) | (ag & ~mask);
126}
127
128static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) {
129    return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src)));
130}
131
132////////////////////////////////////////////////////////////////////////////////////////////
133// Convert a 16bit pixel to a 32bit pixel
134
135#define SK_R16_BITS     5
136#define SK_G16_BITS     6
137#define SK_B16_BITS     5
138
139#define SK_R16_SHIFT    (SK_B16_BITS + SK_G16_BITS)
140#define SK_G16_SHIFT    (SK_B16_BITS)
141#define SK_B16_SHIFT    0
142
143#define SK_R16_MASK     ((1 << SK_R16_BITS) - 1)
144#define SK_G16_MASK     ((1 << SK_G16_BITS) - 1)
145#define SK_B16_MASK     ((1 << SK_B16_BITS) - 1)
146
147#define SkGetPackedR16(color)   (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
148#define SkGetPackedG16(color)   (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
149#define SkGetPackedB16(color)   (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
150
151static inline unsigned SkR16ToR32(unsigned r) {
152    return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
153}
154
155static inline unsigned SkG16ToG32(unsigned g) {
156    return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
157}
158
159static inline unsigned SkB16ToB32(unsigned b) {
160    return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
161}
162
163#define SkPacked16ToR32(c)      SkR16ToR32(SkGetPackedR16(c))
164#define SkPacked16ToG32(c)      SkG16ToG32(SkGetPackedG16(c))
165#define SkPacked16ToB32(c)      SkB16ToB32(SkGetPackedB16(c))
166
167#endif
168