SkGradientShader.cpp revision 1c6d64b78b24083ee9fd7411dac8a4a7e2d03a3c
1 2/* 3 * Copyright 2006 The Android Open Source Project 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9#include "SkGradientShaderPriv.h" 10#include "SkLinearGradient.h" 11#include "SkRadialGradient.h" 12#include "SkTwoPointRadialGradient.h" 13#include "SkTwoPointConicalGradient.h" 14#include "SkSweepGradient.h" 15 16SkGradientShaderBase::SkGradientShaderBase(const SkColor colors[], const SkScalar pos[], 17 int colorCount, SkShader::TileMode mode, SkUnitMapper* mapper) { 18 SkASSERT(colorCount > 1); 19 20 fCacheAlpha = 256; // init to a value that paint.getAlpha() can't return 21 22 fMapper = mapper; 23 SkSafeRef(mapper); 24 25 SkASSERT((unsigned)mode < SkShader::kTileModeCount); 26 SkASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gTileProcs)); 27 fTileMode = mode; 28 fTileProc = gTileProcs[mode]; 29 30 fCache16 = fCache16Storage = NULL; 31 fCache32 = NULL; 32 fCache32PixelRef = NULL; 33 34 /* Note: we let the caller skip the first and/or last position. 35 i.e. pos[0] = 0.3, pos[1] = 0.7 36 In these cases, we insert dummy entries to ensure that the final data 37 will be bracketed by [0, 1]. 38 i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1 39 40 Thus colorCount (the caller's value, and fColorCount (our value) may 41 differ by up to 2. In the above example: 42 colorCount = 2 43 fColorCount = 4 44 */ 45 fColorCount = colorCount; 46 // check if we need to add in dummy start and/or end position/colors 47 bool dummyFirst = false; 48 bool dummyLast = false; 49 if (pos) { 50 dummyFirst = pos[0] != 0; 51 dummyLast = pos[colorCount - 1] != SK_Scalar1; 52 fColorCount += dummyFirst + dummyLast; 53 } 54 55 if (fColorCount > kColorStorageCount) { 56 size_t size = sizeof(SkColor) + sizeof(Rec); 57 fOrigColors = reinterpret_cast<SkColor*>( 58 sk_malloc_throw(size * fColorCount)); 59 } 60 else { 61 fOrigColors = fStorage; 62 } 63 64 // Now copy over the colors, adding the dummies as needed 65 { 66 SkColor* origColors = fOrigColors; 67 if (dummyFirst) { 68 *origColors++ = colors[0]; 69 } 70 memcpy(origColors, colors, colorCount * sizeof(SkColor)); 71 if (dummyLast) { 72 origColors += colorCount; 73 *origColors = colors[colorCount - 1]; 74 } 75 } 76 77 fRecs = (Rec*)(fOrigColors + fColorCount); 78 if (fColorCount > 2) { 79 Rec* recs = fRecs; 80 recs->fPos = 0; 81 // recs->fScale = 0; // unused; 82 recs += 1; 83 if (pos) { 84 /* We need to convert the user's array of relative positions into 85 fixed-point positions and scale factors. We need these results 86 to be strictly monotonic (no two values equal or out of order). 87 Hence this complex loop that just jams a zero for the scale 88 value if it sees a segment out of order, and it assures that 89 we start at 0 and end at 1.0 90 */ 91 SkFixed prev = 0; 92 int startIndex = dummyFirst ? 0 : 1; 93 int count = colorCount + dummyLast; 94 for (int i = startIndex; i < count; i++) { 95 // force the last value to be 1.0 96 SkFixed curr; 97 if (i == colorCount) { // we're really at the dummyLast 98 curr = SK_Fixed1; 99 } else { 100 curr = SkScalarToFixed(pos[i]); 101 } 102 // pin curr withing range 103 if (curr < 0) { 104 curr = 0; 105 } else if (curr > SK_Fixed1) { 106 curr = SK_Fixed1; 107 } 108 recs->fPos = curr; 109 if (curr > prev) { 110 recs->fScale = (1 << 24) / (curr - prev); 111 } else { 112 recs->fScale = 0; // ignore this segment 113 } 114 // get ready for the next value 115 prev = curr; 116 recs += 1; 117 } 118 } else { // assume even distribution 119 SkFixed dp = SK_Fixed1 / (colorCount - 1); 120 SkFixed p = dp; 121 SkFixed scale = (colorCount - 1) << 8; // (1 << 24) / dp 122 for (int i = 1; i < colorCount; i++) { 123 recs->fPos = p; 124 recs->fScale = scale; 125 recs += 1; 126 p += dp; 127 } 128 } 129 } 130 this->initCommon(); 131} 132 133SkGradientShaderBase::SkGradientShaderBase(SkFlattenableReadBuffer& buffer) : 134 INHERITED(buffer) { 135 fCacheAlpha = 256; 136 137 fMapper = static_cast<SkUnitMapper*>(buffer.readFlattenable()); 138 139 fCache16 = fCache16Storage = NULL; 140 fCache32 = NULL; 141 fCache32PixelRef = NULL; 142 143 int colorCount = fColorCount = buffer.readU32(); 144 if (colorCount > kColorStorageCount) { 145 size_t size = sizeof(SkColor) + sizeof(SkPMColor) + sizeof(Rec); 146 fOrigColors = (SkColor*)sk_malloc_throw(size * colorCount); 147 } else { 148 fOrigColors = fStorage; 149 } 150 buffer.read(fOrigColors, colorCount * sizeof(SkColor)); 151 152 fTileMode = (TileMode)buffer.readU8(); 153 fTileProc = gTileProcs[fTileMode]; 154 fRecs = (Rec*)(fOrigColors + colorCount); 155 if (colorCount > 2) { 156 Rec* recs = fRecs; 157 recs[0].fPos = 0; 158 for (int i = 1; i < colorCount; i++) { 159 recs[i].fPos = buffer.readS32(); 160 recs[i].fScale = buffer.readU32(); 161 } 162 } 163 buffer.readMatrix(&fPtsToUnit); 164 this->initCommon(); 165} 166 167SkGradientShaderBase::~SkGradientShaderBase() { 168 if (fCache16Storage) { 169 sk_free(fCache16Storage); 170 } 171 SkSafeUnref(fCache32PixelRef); 172 if (fOrigColors != fStorage) { 173 sk_free(fOrigColors); 174 } 175 SkSafeUnref(fMapper); 176} 177 178void SkGradientShaderBase::initCommon() { 179 fFlags = 0; 180 unsigned colorAlpha = 0xFF; 181 for (int i = 0; i < fColorCount; i++) { 182 colorAlpha &= SkColorGetA(fOrigColors[i]); 183 } 184 fColorsAreOpaque = colorAlpha == 0xFF; 185} 186 187void SkGradientShaderBase::flatten(SkFlattenableWriteBuffer& buffer) const { 188 this->INHERITED::flatten(buffer); 189 buffer.writeFlattenable(fMapper); 190 buffer.write32(fColorCount); 191 buffer.writeMul4(fOrigColors, fColorCount * sizeof(SkColor)); 192 buffer.write8(fTileMode); 193 if (fColorCount > 2) { 194 Rec* recs = fRecs; 195 for (int i = 1; i < fColorCount; i++) { 196 buffer.write32(recs[i].fPos); 197 buffer.write32(recs[i].fScale); 198 } 199 } 200 buffer.writeMatrix(fPtsToUnit); 201} 202 203bool SkGradientShaderBase::isOpaque() const { 204 return fColorsAreOpaque; 205} 206 207bool SkGradientShaderBase::setContext(const SkBitmap& device, 208 const SkPaint& paint, 209 const SkMatrix& matrix) { 210 if (!this->INHERITED::setContext(device, paint, matrix)) { 211 return false; 212 } 213 214 const SkMatrix& inverse = this->getTotalInverse(); 215 216 if (!fDstToIndex.setConcat(fPtsToUnit, inverse)) { 217 return false; 218 } 219 220 fDstToIndexProc = fDstToIndex.getMapXYProc(); 221 fDstToIndexClass = (uint8_t)SkShader::ComputeMatrixClass(fDstToIndex); 222 223 // now convert our colors in to PMColors 224 unsigned paintAlpha = this->getPaintAlpha(); 225 226 fFlags = this->INHERITED::getFlags(); 227 if (fColorsAreOpaque && paintAlpha == 0xFF) { 228 fFlags |= kOpaqueAlpha_Flag; 229 } 230 // we can do span16 as long as our individual colors are opaque, 231 // regardless of the paint's alpha 232 if (fColorsAreOpaque) { 233 fFlags |= kHasSpan16_Flag; 234 } 235 236 this->setCacheAlpha(paintAlpha); 237 return true; 238} 239 240void SkGradientShaderBase::setCacheAlpha(U8CPU alpha) const { 241 // if the new alpha differs from the previous time we were called, inval our cache 242 // this will trigger the cache to be rebuilt. 243 // we don't care about the first time, since the cache ptrs will already be NULL 244 if (fCacheAlpha != alpha) { 245 fCache16 = NULL; // inval the cache 246 fCache32 = NULL; // inval the cache 247 fCacheAlpha = alpha; // record the new alpha 248 // inform our subclasses 249 if (fCache32PixelRef) { 250 fCache32PixelRef->notifyPixelsChanged(); 251 } 252 } 253} 254 255#define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) 256 257/** We take the original colors, not our premultiplied PMColors, since we can 258 build a 16bit table as long as the original colors are opaque, even if the 259 paint specifies a non-opaque alpha. 260*/ 261void SkGradientShaderBase::Build16bitCache(uint16_t cache[], SkColor c0, SkColor c1, 262 int count) { 263 SkASSERT(count > 1); 264 SkASSERT(SkColorGetA(c0) == 0xFF); 265 SkASSERT(SkColorGetA(c1) == 0xFF); 266 267 SkFixed r = SkColorGetR(c0); 268 SkFixed g = SkColorGetG(c0); 269 SkFixed b = SkColorGetB(c0); 270 271 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); 272 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); 273 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1); 274 275 r = SkIntToFixed(r) + 0x8000; 276 g = SkIntToFixed(g) + 0x8000; 277 b = SkIntToFixed(b) + 0x8000; 278 279 do { 280 unsigned rr = r >> 16; 281 unsigned gg = g >> 16; 282 unsigned bb = b >> 16; 283 cache[0] = SkPackRGB16(SkR32ToR16(rr), SkG32ToG16(gg), SkB32ToB16(bb)); 284 cache[kCache16Count] = SkDitherPack888ToRGB16(rr, gg, bb); 285 cache += 1; 286 r += dr; 287 g += dg; 288 b += db; 289 } while (--count != 0); 290} 291 292/* 293 * 2x2 dither a fixed-point color component (8.16) down to 8, matching the 294 * semantics of how we 2x2 dither 32->16 295 */ 296static inline U8CPU dither_fixed_to_8(SkFixed n) { 297 n >>= 8; 298 return ((n << 1) - ((n >> 8 << 8) | (n >> 8))) >> 8; 299} 300 301/* 302 * For dithering with premultiply, we want to ceiling the alpha component, 303 * to ensure that it is always >= any color component. 304 */ 305static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) { 306 n >>= 8; 307 return ((n << 1) - (n | (n >> 8))) >> 8; 308} 309 310void SkGradientShaderBase::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1, 311 int count, U8CPU paintAlpha) { 312 SkASSERT(count > 1); 313 314 // need to apply paintAlpha to our two endpoints 315 SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); 316 SkFixed da; 317 { 318 int tmp = SkMulDiv255Round(SkColorGetA(c1), paintAlpha); 319 da = SkIntToFixed(tmp - a) / (count - 1); 320 } 321 322 SkFixed r = SkColorGetR(c0); 323 SkFixed g = SkColorGetG(c0); 324 SkFixed b = SkColorGetB(c0); 325 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); 326 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); 327 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1); 328 329 a = SkIntToFixed(a) + 0x8000; 330 r = SkIntToFixed(r) + 0x8000; 331 g = SkIntToFixed(g) + 0x8000; 332 b = SkIntToFixed(b) + 0x8000; 333 334 do { 335 cache[0] = SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16); 336 cache[kCache32Count] = 337 SkPremultiplyARGBInline(dither_ceil_fixed_to_8(a), 338 dither_fixed_to_8(r), 339 dither_fixed_to_8(g), 340 dither_fixed_to_8(b)); 341 cache += 1; 342 a += da; 343 r += dr; 344 g += dg; 345 b += db; 346 } while (--count != 0); 347} 348 349static inline int SkFixedToFFFF(SkFixed x) { 350 SkASSERT((unsigned)x <= SK_Fixed1); 351 return x - (x >> 16); 352} 353 354static inline U16CPU bitsTo16(unsigned x, const unsigned bits) { 355 SkASSERT(x < (1U << bits)); 356 if (6 == bits) { 357 return (x << 10) | (x << 4) | (x >> 2); 358 } 359 if (8 == bits) { 360 return (x << 8) | x; 361 } 362 sk_throw(); 363 return 0; 364} 365 366/** We duplicate the last value in each half of the cache so that 367 interpolation doesn't have to special-case being at the last point. 368*/ 369static void complete_16bit_cache(uint16_t* cache, int stride) { 370 cache[stride - 1] = cache[stride - 2]; 371 cache[2 * stride - 1] = cache[2 * stride - 2]; 372} 373 374const uint16_t* SkGradientShaderBase::getCache16() const { 375 if (fCache16 == NULL) { 376 // double the count for dither entries 377 const int entryCount = kCache16Count * 2; 378 const size_t allocSize = sizeof(uint16_t) * entryCount; 379 380 if (fCache16Storage == NULL) { // set the storage and our working ptr 381 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); 382 } 383 fCache16 = fCache16Storage; 384 if (fColorCount == 2) { 385 Build16bitCache(fCache16, fOrigColors[0], fOrigColors[1], 386 kGradient16Length); 387 } else { 388 Rec* rec = fRecs; 389 int prevIndex = 0; 390 for (int i = 1; i < fColorCount; i++) { 391 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; 392 SkASSERT(nextIndex < kCache16Count); 393 394 if (nextIndex > prevIndex) 395 Build16bitCache(fCache16 + prevIndex, fOrigColors[i-1], fOrigColors[i], nextIndex - prevIndex + 1); 396 prevIndex = nextIndex; 397 } 398 // one extra space left over at the end for complete_16bit_cache() 399 SkASSERT(prevIndex == kGradient16Length - 1); 400 } 401 402 if (fMapper) { 403 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); 404 uint16_t* linear = fCache16; // just computed linear data 405 uint16_t* mapped = fCache16Storage; // storage for mapped data 406 SkUnitMapper* map = fMapper; 407 for (int i = 0; i < kGradient16Length; i++) { 408 int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache16Shift; 409 mapped[i] = linear[index]; 410 mapped[i + kCache16Count] = linear[index + kCache16Count]; 411 } 412 sk_free(fCache16); 413 fCache16 = fCache16Storage; 414 } 415 complete_16bit_cache(fCache16, kCache16Count); 416 } 417 return fCache16; 418} 419 420/** We duplicate the last value in each half of the cache so that 421 interpolation doesn't have to special-case being at the last point. 422*/ 423static void complete_32bit_cache(SkPMColor* cache, int stride) { 424 cache[stride - 1] = cache[stride - 2]; 425 cache[2 * stride - 1] = cache[2 * stride - 2]; 426} 427 428const SkPMColor* SkGradientShaderBase::getCache32() const { 429 if (fCache32 == NULL) { 430 // double the count for dither entries 431 const int entryCount = kCache32Count * 2; 432 const size_t allocSize = sizeof(SkPMColor) * entryCount; 433 434 if (NULL == fCache32PixelRef) { 435 fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef, 436 (NULL, allocSize, NULL)); 437 } 438 fCache32 = (SkPMColor*)fCache32PixelRef->getAddr(); 439 if (fColorCount == 2) { 440 Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1], 441 kGradient32Length, fCacheAlpha); 442 } else { 443 Rec* rec = fRecs; 444 int prevIndex = 0; 445 for (int i = 1; i < fColorCount; i++) { 446 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; 447 SkASSERT(nextIndex < kGradient32Length); 448 449 if (nextIndex > prevIndex) 450 Build32bitCache(fCache32 + prevIndex, fOrigColors[i-1], 451 fOrigColors[i], 452 nextIndex - prevIndex + 1, fCacheAlpha); 453 prevIndex = nextIndex; 454 } 455 SkASSERT(prevIndex == kGradient32Length - 1); 456 } 457 458 if (fMapper) { 459 SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef, 460 (NULL, allocSize, NULL)); 461 SkPMColor* linear = fCache32; // just computed linear data 462 SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data 463 SkUnitMapper* map = fMapper; 464 for (int i = 0; i < kGradient32Length; i++) { 465 int index = map->mapUnit16((i << 8) | i) >> 8; 466 mapped[i] = linear[index]; 467 mapped[i + kCache32Count] = linear[index + kCache32Count]; 468 } 469 fCache32PixelRef->unref(); 470 fCache32PixelRef = newPR; 471 fCache32 = (SkPMColor*)newPR->getAddr(); 472 } 473 complete_32bit_cache(fCache32, kCache32Count); 474 } 475 return fCache32; 476} 477 478/* 479 * Because our caller might rebuild the same (logically the same) gradient 480 * over and over, we'd like to return exactly the same "bitmap" if possible, 481 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). 482 * To do that, we maintain a private cache of built-bitmaps, based on our 483 * colors and positions. Note: we don't try to flatten the fMapper, so if one 484 * is present, we skip the cache for now. 485 */ 486void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { 487 // our caller assumes no external alpha, so we ensure that our cache is 488 // built with 0xFF 489 this->setCacheAlpha(0xFF); 490 491 // don't have a way to put the mapper into our cache-key yet 492 if (fMapper) { 493 // force our cahce32pixelref to be built 494 (void)this->getCache32(); 495 bitmap->setConfig(SkBitmap::kARGB_8888_Config, kGradient32Length, 1); 496 bitmap->setPixelRef(fCache32PixelRef); 497 return; 498 } 499 500 // build our key: [numColors + colors[] + {positions[]} ] 501 int count = 1 + fColorCount; 502 if (fColorCount > 2) { 503 count += fColorCount - 1; // fRecs[].fPos 504 } 505 506 SkAutoSTMalloc<16, int32_t> storage(count); 507 int32_t* buffer = storage.get(); 508 509 *buffer++ = fColorCount; 510 memcpy(buffer, fOrigColors, fColorCount * sizeof(SkColor)); 511 buffer += fColorCount; 512 if (fColorCount > 2) { 513 for (int i = 1; i < fColorCount; i++) { 514 *buffer++ = fRecs[i].fPos; 515 } 516 } 517 SkASSERT(buffer - storage.get() == count); 518 519 /////////////////////////////////// 520 521 SK_DECLARE_STATIC_MUTEX(gMutex); 522 static SkBitmapCache* gCache; 523 // each cache cost 1K of RAM, since each bitmap will be 1x256 at 32bpp 524 static const int MAX_NUM_CACHED_GRADIENT_BITMAPS = 32; 525 SkAutoMutexAcquire ama(gMutex); 526 527 if (NULL == gCache) { 528 gCache = SkNEW_ARGS(SkBitmapCache, (MAX_NUM_CACHED_GRADIENT_BITMAPS)); 529 } 530 size_t size = count * sizeof(int32_t); 531 532 if (!gCache->find(storage.get(), size, bitmap)) { 533 // force our cahce32pixelref to be built 534 (void)this->getCache32(); 535 // Only expose the linear section of the cache; don't let the caller 536 // know about the padding at the end to make interpolation faster. 537 bitmap->setConfig(SkBitmap::kARGB_8888_Config, kGradient32Length, 1); 538 bitmap->setPixelRef(fCache32PixelRef); 539 540 gCache->add(storage.get(), size, *bitmap); 541 } 542} 543 544void SkGradientShaderBase::commonAsAGradient(GradientInfo* info) const { 545 if (info) { 546 if (info->fColorCount >= fColorCount) { 547 if (info->fColors) { 548 memcpy(info->fColors, fOrigColors, 549 fColorCount * sizeof(SkColor)); 550 } 551 if (info->fColorOffsets) { 552 if (fColorCount == 2) { 553 info->fColorOffsets[0] = 0; 554 info->fColorOffsets[1] = SK_Scalar1; 555 } else if (fColorCount > 2) { 556 for (int i = 0; i < fColorCount; i++) 557 info->fColorOffsets[i] = SkFixedToScalar(fRecs[i].fPos); 558 } 559 } 560 } 561 info->fColorCount = fColorCount; 562 info->fTileMode = fTileMode; 563 } 564} 565 566/////////////////////////////////////////////////////////////////////////////// 567/////////////////////////////////////////////////////////////////////////////// 568 569#include "SkEmptyShader.h" 570 571// assumes colors is SkColor* and pos is SkScalar* 572#define EXPAND_1_COLOR(count) \ 573 SkColor tmp[2]; \ 574 do { \ 575 if (1 == count) { \ 576 tmp[0] = tmp[1] = colors[0]; \ 577 colors = tmp; \ 578 pos = NULL; \ 579 count = 2; \ 580 } \ 581 } while (0) 582 583SkShader* SkGradientShader::CreateLinear(const SkPoint pts[2], 584 const SkColor colors[], 585 const SkScalar pos[], int colorCount, 586 SkShader::TileMode mode, 587 SkUnitMapper* mapper) { 588 if (NULL == pts || NULL == colors || colorCount < 1) { 589 return NULL; 590 } 591 EXPAND_1_COLOR(colorCount); 592 593 return SkNEW_ARGS(SkLinearGradient, 594 (pts, colors, pos, colorCount, mode, mapper)); 595} 596 597SkShader* SkGradientShader::CreateRadial(const SkPoint& center, SkScalar radius, 598 const SkColor colors[], 599 const SkScalar pos[], int colorCount, 600 SkShader::TileMode mode, 601 SkUnitMapper* mapper) { 602 if (radius <= 0 || NULL == colors || colorCount < 1) { 603 return NULL; 604 } 605 EXPAND_1_COLOR(colorCount); 606 607 return SkNEW_ARGS(SkRadialGradient, 608 (center, radius, colors, pos, colorCount, mode, mapper)); 609} 610 611SkShader* SkGradientShader::CreateTwoPointRadial(const SkPoint& start, 612 SkScalar startRadius, 613 const SkPoint& end, 614 SkScalar endRadius, 615 const SkColor colors[], 616 const SkScalar pos[], 617 int colorCount, 618 SkShader::TileMode mode, 619 SkUnitMapper* mapper) { 620 if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) { 621 return NULL; 622 } 623 EXPAND_1_COLOR(colorCount); 624 625 return SkNEW_ARGS(SkTwoPointRadialGradient, 626 (start, startRadius, end, endRadius, colors, pos, 627 colorCount, mode, mapper)); 628} 629 630SkShader* SkGradientShader::CreateTwoPointConical(const SkPoint& start, 631 SkScalar startRadius, 632 const SkPoint& end, 633 SkScalar endRadius, 634 const SkColor colors[], 635 const SkScalar pos[], 636 int colorCount, 637 SkShader::TileMode mode, 638 SkUnitMapper* mapper) { 639 if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) { 640 return NULL; 641 } 642 if (start == end && startRadius == endRadius) { 643 return SkNEW(SkEmptyShader); 644 } 645 646 return SkNEW_ARGS(SkTwoPointConicalGradient, 647 (start, startRadius, end, endRadius, colors, pos, 648 colorCount, mode, mapper)); 649} 650 651SkShader* SkGradientShader::CreateSweep(SkScalar cx, SkScalar cy, 652 const SkColor colors[], 653 const SkScalar pos[], 654 int count, SkUnitMapper* mapper) { 655 if (NULL == colors || count < 1) { 656 return NULL; 657 } 658 EXPAND_1_COLOR(count); 659 660 return SkNEW_ARGS(SkSweepGradient, (cx, cy, colors, pos, count, mapper)); 661} 662 663SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkGradientShader) 664 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkLinearGradient) 665 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkRadialGradient) 666 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkSweepGradient) 667 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkTwoPointRadialGradient) 668 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkTwoPointConicalGradient) 669SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END 670 671/////////////////////////////////////////////////////////////////////////////// 672 673GrGLGradientStage::GrGLGradientStage(const GrProgramStageFactory& factory) 674 : INHERITED(factory) { } 675 676GrGLGradientStage::~GrGLGradientStage() { } 677 678void GrGLGradientStage::emitColorLookup(GrGLShaderBuilder* builder, 679 const char* tName, 680 const char* outputColor, 681 const char* samplerName) { 682 // Texture is effectively 1D so the y coordinate is 0.5, if we pack multiple 683 // gradients into a texture, we could instead pick the appropriate row here 684 builder->fSampleCoords.printf("vec2(%s, 0.5)", tName); 685 builder->fComplexCoord = true; 686 builder->emitDefaultFetch(outputColor, samplerName); 687} 688 689///////////////////////////////////////////////////////////////////// 690 691GrGradientEffect::GrGradientEffect(GrTexture* texture) 692 : fTexture (texture) 693 , fUseTexture(true) { 694 SkSafeRef(fTexture); 695} 696 697GrGradientEffect::GrGradientEffect(GrContext* ctx, 698 const SkGradientShaderBase& shader, 699 GrSamplerState* sampler) 700 : fTexture (NULL) 701 , fUseTexture (false) { 702 // TODO: check for simple cases where we don't need a texture: 703 //GradientInfo info; 704 //shader.asAGradient(&info); 705 //if (info.fColorCount == 2) { ... 706 707 SkBitmap bitmap; 708 shader.getGradientTableBitmap(&bitmap); 709 710 GrContext::TextureCacheEntry entry = GrLockCachedBitmapTexture(ctx, bitmap, 711 sampler->textureParams()); 712 fTexture = entry.texture(); 713 SkSafeRef(fTexture); 714 fUseTexture = true; 715 716 // Unlock immediately, this is not great, but we don't have a way of 717 // knowing when else to unlock it currently, so it may get purged from 718 // the cache, but it'll still be ref'd until it's no longer being used. 719 GrUnlockCachedBitmapTexture(ctx, entry); 720} 721 722GrGradientEffect::~GrGradientEffect() { 723 SkSafeUnref(fTexture); 724} 725 726unsigned int GrGradientEffect::numTextures() const { 727 return fUseTexture ? 1 : 0; 728} 729 730GrTexture* GrGradientEffect::texture(unsigned int index) 731 const { 732 GrAssert(fUseTexture && 0 == index); 733 return fTexture; 734} 735 736