SkGradientShader.cpp revision 868a8e7fc83e9ac6ee1418e75b84a0595605626c
1 2/* 3 * Copyright 2006 The Android Open Source Project 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9#include "SkGradientShaderPriv.h" 10#include "SkLinearGradient.h" 11#include "SkRadialGradient.h" 12#include "SkTwoPointRadialGradient.h" 13#include "SkTwoPointConicalGradient.h" 14#include "SkSweepGradient.h" 15 16SkGradientShaderBase::SkGradientShaderBase(const SkColor colors[], const SkScalar pos[], 17 int colorCount, SkShader::TileMode mode, SkUnitMapper* mapper) { 18 SkASSERT(colorCount > 1); 19 20 fCacheAlpha = 256; // init to a value that paint.getAlpha() can't return 21 22 fMapper = mapper; 23 SkSafeRef(mapper); 24 25 SkASSERT((unsigned)mode < SkShader::kTileModeCount); 26 SkASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gTileProcs)); 27 fTileMode = mode; 28 fTileProc = gTileProcs[mode]; 29 30 fCache16 = fCache16Storage = NULL; 31 fCache32 = NULL; 32 fCache32PixelRef = NULL; 33 34 /* Note: we let the caller skip the first and/or last position. 35 i.e. pos[0] = 0.3, pos[1] = 0.7 36 In these cases, we insert dummy entries to ensure that the final data 37 will be bracketed by [0, 1]. 38 i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1 39 40 Thus colorCount (the caller's value, and fColorCount (our value) may 41 differ by up to 2. In the above example: 42 colorCount = 2 43 fColorCount = 4 44 */ 45 fColorCount = colorCount; 46 // check if we need to add in dummy start and/or end position/colors 47 bool dummyFirst = false; 48 bool dummyLast = false; 49 if (pos) { 50 dummyFirst = pos[0] != 0; 51 dummyLast = pos[colorCount - 1] != SK_Scalar1; 52 fColorCount += dummyFirst + dummyLast; 53 } 54 55 if (fColorCount > kColorStorageCount) { 56 size_t size = sizeof(SkColor) + sizeof(Rec); 57 fOrigColors = reinterpret_cast<SkColor*>( 58 sk_malloc_throw(size * fColorCount)); 59 } 60 else { 61 fOrigColors = fStorage; 62 } 63 64 // Now copy over the colors, adding the dummies as needed 65 { 66 SkColor* origColors = fOrigColors; 67 if (dummyFirst) { 68 *origColors++ = colors[0]; 69 } 70 memcpy(origColors, colors, colorCount * sizeof(SkColor)); 71 if (dummyLast) { 72 origColors += colorCount; 73 *origColors = colors[colorCount - 1]; 74 } 75 } 76 77 fRecs = (Rec*)(fOrigColors + fColorCount); 78 if (fColorCount > 2) { 79 Rec* recs = fRecs; 80 recs->fPos = 0; 81 // recs->fScale = 0; // unused; 82 recs += 1; 83 if (pos) { 84 /* We need to convert the user's array of relative positions into 85 fixed-point positions and scale factors. We need these results 86 to be strictly monotonic (no two values equal or out of order). 87 Hence this complex loop that just jams a zero for the scale 88 value if it sees a segment out of order, and it assures that 89 we start at 0 and end at 1.0 90 */ 91 SkFixed prev = 0; 92 int startIndex = dummyFirst ? 0 : 1; 93 int count = colorCount + dummyLast; 94 for (int i = startIndex; i < count; i++) { 95 // force the last value to be 1.0 96 SkFixed curr; 97 if (i == colorCount) { // we're really at the dummyLast 98 curr = SK_Fixed1; 99 } else { 100 curr = SkScalarToFixed(pos[i]); 101 } 102 // pin curr withing range 103 if (curr < 0) { 104 curr = 0; 105 } else if (curr > SK_Fixed1) { 106 curr = SK_Fixed1; 107 } 108 recs->fPos = curr; 109 if (curr > prev) { 110 recs->fScale = (1 << 24) / (curr - prev); 111 } else { 112 recs->fScale = 0; // ignore this segment 113 } 114 // get ready for the next value 115 prev = curr; 116 recs += 1; 117 } 118 } else { // assume even distribution 119 SkFixed dp = SK_Fixed1 / (colorCount - 1); 120 SkFixed p = dp; 121 SkFixed scale = (colorCount - 1) << 8; // (1 << 24) / dp 122 for (int i = 1; i < colorCount; i++) { 123 recs->fPos = p; 124 recs->fScale = scale; 125 recs += 1; 126 p += dp; 127 } 128 } 129 } 130 this->initCommon(); 131} 132 133SkGradientShaderBase::SkGradientShaderBase(SkFlattenableReadBuffer& buffer) : 134 INHERITED(buffer) { 135 fCacheAlpha = 256; 136 137 fMapper = buffer.readFlattenableT<SkUnitMapper>(); 138 139 fCache16 = fCache16Storage = NULL; 140 fCache32 = NULL; 141 fCache32PixelRef = NULL; 142 143 int colorCount = fColorCount = buffer.getArrayCount(); 144 if (colorCount > kColorStorageCount) { 145 size_t size = sizeof(SkColor) + sizeof(SkPMColor) + sizeof(Rec); 146 fOrigColors = (SkColor*)sk_malloc_throw(size * colorCount); 147 } else { 148 fOrigColors = fStorage; 149 } 150 buffer.readColorArray(fOrigColors); 151 152 fTileMode = (TileMode)buffer.readUInt(); 153 fTileProc = gTileProcs[fTileMode]; 154 fRecs = (Rec*)(fOrigColors + colorCount); 155 if (colorCount > 2) { 156 Rec* recs = fRecs; 157 recs[0].fPos = 0; 158 for (int i = 1; i < colorCount; i++) { 159 recs[i].fPos = buffer.readInt(); 160 recs[i].fScale = buffer.readUInt(); 161 } 162 } 163 buffer.readMatrix(&fPtsToUnit); 164 this->initCommon(); 165} 166 167SkGradientShaderBase::~SkGradientShaderBase() { 168 if (fCache16Storage) { 169 sk_free(fCache16Storage); 170 } 171 SkSafeUnref(fCache32PixelRef); 172 if (fOrigColors != fStorage) { 173 sk_free(fOrigColors); 174 } 175 SkSafeUnref(fMapper); 176} 177 178void SkGradientShaderBase::initCommon() { 179 fFlags = 0; 180 unsigned colorAlpha = 0xFF; 181 for (int i = 0; i < fColorCount; i++) { 182 colorAlpha &= SkColorGetA(fOrigColors[i]); 183 } 184 fColorsAreOpaque = colorAlpha == 0xFF; 185} 186 187void SkGradientShaderBase::flatten(SkFlattenableWriteBuffer& buffer) const { 188 this->INHERITED::flatten(buffer); 189 buffer.writeFlattenable(fMapper); 190 buffer.writeColorArray(fOrigColors, fColorCount); 191 buffer.writeUInt(fTileMode); 192 if (fColorCount > 2) { 193 Rec* recs = fRecs; 194 for (int i = 1; i < fColorCount; i++) { 195 buffer.writeInt(recs[i].fPos); 196 buffer.writeUInt(recs[i].fScale); 197 } 198 } 199 buffer.writeMatrix(fPtsToUnit); 200} 201 202bool SkGradientShaderBase::isOpaque() const { 203 return fColorsAreOpaque; 204} 205 206bool SkGradientShaderBase::setContext(const SkBitmap& device, 207 const SkPaint& paint, 208 const SkMatrix& matrix) { 209 if (!this->INHERITED::setContext(device, paint, matrix)) { 210 return false; 211 } 212 213 const SkMatrix& inverse = this->getTotalInverse(); 214 215 if (!fDstToIndex.setConcat(fPtsToUnit, inverse)) { 216 return false; 217 } 218 219 fDstToIndexProc = fDstToIndex.getMapXYProc(); 220 fDstToIndexClass = (uint8_t)SkShader::ComputeMatrixClass(fDstToIndex); 221 222 // now convert our colors in to PMColors 223 unsigned paintAlpha = this->getPaintAlpha(); 224 225 fFlags = this->INHERITED::getFlags(); 226 if (fColorsAreOpaque && paintAlpha == 0xFF) { 227 fFlags |= kOpaqueAlpha_Flag; 228 } 229 // we can do span16 as long as our individual colors are opaque, 230 // regardless of the paint's alpha 231 if (fColorsAreOpaque) { 232 fFlags |= kHasSpan16_Flag; 233 } 234 235 this->setCacheAlpha(paintAlpha); 236 return true; 237} 238 239void SkGradientShaderBase::setCacheAlpha(U8CPU alpha) const { 240 // if the new alpha differs from the previous time we were called, inval our cache 241 // this will trigger the cache to be rebuilt. 242 // we don't care about the first time, since the cache ptrs will already be NULL 243 if (fCacheAlpha != alpha) { 244 fCache16 = NULL; // inval the cache 245 fCache32 = NULL; // inval the cache 246 fCacheAlpha = alpha; // record the new alpha 247 // inform our subclasses 248 if (fCache32PixelRef) { 249 fCache32PixelRef->notifyPixelsChanged(); 250 } 251 } 252} 253 254#define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) 255 256/** We take the original colors, not our premultiplied PMColors, since we can 257 build a 16bit table as long as the original colors are opaque, even if the 258 paint specifies a non-opaque alpha. 259*/ 260void SkGradientShaderBase::Build16bitCache(uint16_t cache[], SkColor c0, SkColor c1, 261 int count) { 262 SkASSERT(count > 1); 263 SkASSERT(SkColorGetA(c0) == 0xFF); 264 SkASSERT(SkColorGetA(c1) == 0xFF); 265 266 SkFixed r = SkColorGetR(c0); 267 SkFixed g = SkColorGetG(c0); 268 SkFixed b = SkColorGetB(c0); 269 270 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); 271 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); 272 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1); 273 274 r = SkIntToFixed(r) + 0x8000; 275 g = SkIntToFixed(g) + 0x8000; 276 b = SkIntToFixed(b) + 0x8000; 277 278 do { 279 unsigned rr = r >> 16; 280 unsigned gg = g >> 16; 281 unsigned bb = b >> 16; 282 cache[0] = SkPackRGB16(SkR32ToR16(rr), SkG32ToG16(gg), SkB32ToB16(bb)); 283 cache[kCache16Count] = SkDitherPack888ToRGB16(rr, gg, bb); 284 cache += 1; 285 r += dr; 286 g += dg; 287 b += db; 288 } while (--count != 0); 289} 290 291/* 292 * 2x2 dither a fixed-point color component (8.16) down to 8, matching the 293 * semantics of how we 2x2 dither 32->16 294 */ 295static inline U8CPU dither_fixed_to_8(SkFixed n) { 296 n >>= 8; 297 return ((n << 1) - ((n >> 8 << 8) | (n >> 8))) >> 8; 298} 299 300/* 301 * For dithering with premultiply, we want to ceiling the alpha component, 302 * to ensure that it is always >= any color component. 303 */ 304static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) { 305 n >>= 8; 306 return ((n << 1) - (n | (n >> 8))) >> 8; 307} 308 309void SkGradientShaderBase::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1, 310 int count, U8CPU paintAlpha) { 311 SkASSERT(count > 1); 312 313 // need to apply paintAlpha to our two endpoints 314 SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); 315 SkFixed da; 316 { 317 int tmp = SkMulDiv255Round(SkColorGetA(c1), paintAlpha); 318 da = SkIntToFixed(tmp - a) / (count - 1); 319 } 320 321 SkFixed r = SkColorGetR(c0); 322 SkFixed g = SkColorGetG(c0); 323 SkFixed b = SkColorGetB(c0); 324 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); 325 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); 326 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1); 327 328 a = SkIntToFixed(a) + 0x8000; 329 r = SkIntToFixed(r) + 0x8000; 330 g = SkIntToFixed(g) + 0x8000; 331 b = SkIntToFixed(b) + 0x8000; 332 333 do { 334 cache[0] = SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16); 335 cache[kCache32Count] = 336 SkPremultiplyARGBInline(dither_ceil_fixed_to_8(a), 337 dither_fixed_to_8(r), 338 dither_fixed_to_8(g), 339 dither_fixed_to_8(b)); 340 cache += 1; 341 a += da; 342 r += dr; 343 g += dg; 344 b += db; 345 } while (--count != 0); 346} 347 348static inline int SkFixedToFFFF(SkFixed x) { 349 SkASSERT((unsigned)x <= SK_Fixed1); 350 return x - (x >> 16); 351} 352 353static inline U16CPU bitsTo16(unsigned x, const unsigned bits) { 354 SkASSERT(x < (1U << bits)); 355 if (6 == bits) { 356 return (x << 10) | (x << 4) | (x >> 2); 357 } 358 if (8 == bits) { 359 return (x << 8) | x; 360 } 361 sk_throw(); 362 return 0; 363} 364 365/** We duplicate the last value in each half of the cache so that 366 interpolation doesn't have to special-case being at the last point. 367*/ 368static void complete_16bit_cache(uint16_t* cache, int stride) { 369 cache[stride - 1] = cache[stride - 2]; 370 cache[2 * stride - 1] = cache[2 * stride - 2]; 371} 372 373const uint16_t* SkGradientShaderBase::getCache16() const { 374 if (fCache16 == NULL) { 375 // double the count for dither entries 376 const int entryCount = kCache16Count * 2; 377 const size_t allocSize = sizeof(uint16_t) * entryCount; 378 379 if (fCache16Storage == NULL) { // set the storage and our working ptr 380 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); 381 } 382 fCache16 = fCache16Storage; 383 if (fColorCount == 2) { 384 Build16bitCache(fCache16, fOrigColors[0], fOrigColors[1], 385 kGradient16Length); 386 } else { 387 Rec* rec = fRecs; 388 int prevIndex = 0; 389 for (int i = 1; i < fColorCount; i++) { 390 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; 391 SkASSERT(nextIndex < kCache16Count); 392 393 if (nextIndex > prevIndex) 394 Build16bitCache(fCache16 + prevIndex, fOrigColors[i-1], fOrigColors[i], nextIndex - prevIndex + 1); 395 prevIndex = nextIndex; 396 } 397 // one extra space left over at the end for complete_16bit_cache() 398 SkASSERT(prevIndex == kGradient16Length - 1); 399 } 400 401 if (fMapper) { 402 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); 403 uint16_t* linear = fCache16; // just computed linear data 404 uint16_t* mapped = fCache16Storage; // storage for mapped data 405 SkUnitMapper* map = fMapper; 406 for (int i = 0; i < kGradient16Length; i++) { 407 int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache16Shift; 408 mapped[i] = linear[index]; 409 mapped[i + kCache16Count] = linear[index + kCache16Count]; 410 } 411 sk_free(fCache16); 412 fCache16 = fCache16Storage; 413 } 414 complete_16bit_cache(fCache16, kCache16Count); 415 } 416 return fCache16; 417} 418 419/** We duplicate the last value in each half of the cache so that 420 interpolation doesn't have to special-case being at the last point. 421*/ 422static void complete_32bit_cache(SkPMColor* cache, int stride) { 423 cache[stride - 1] = cache[stride - 2]; 424 cache[2 * stride - 1] = cache[2 * stride - 2]; 425} 426 427const SkPMColor* SkGradientShaderBase::getCache32() const { 428 if (fCache32 == NULL) { 429 // double the count for dither entries 430 const int entryCount = kCache32Count * 2; 431 const size_t allocSize = sizeof(SkPMColor) * entryCount; 432 433 if (NULL == fCache32PixelRef) { 434 fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef, 435 (NULL, allocSize, NULL)); 436 } 437 fCache32 = (SkPMColor*)fCache32PixelRef->getAddr(); 438 if (fColorCount == 2) { 439 Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1], 440 kGradient32Length, fCacheAlpha); 441 } else { 442 Rec* rec = fRecs; 443 int prevIndex = 0; 444 for (int i = 1; i < fColorCount; i++) { 445 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; 446 SkASSERT(nextIndex < kGradient32Length); 447 448 if (nextIndex > prevIndex) 449 Build32bitCache(fCache32 + prevIndex, fOrigColors[i-1], 450 fOrigColors[i], 451 nextIndex - prevIndex + 1, fCacheAlpha); 452 prevIndex = nextIndex; 453 } 454 SkASSERT(prevIndex == kGradient32Length - 1); 455 } 456 457 if (fMapper) { 458 SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef, 459 (NULL, allocSize, NULL)); 460 SkPMColor* linear = fCache32; // just computed linear data 461 SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data 462 SkUnitMapper* map = fMapper; 463 for (int i = 0; i < kGradient32Length; i++) { 464 int index = map->mapUnit16((i << 8) | i) >> 8; 465 mapped[i] = linear[index]; 466 mapped[i + kCache32Count] = linear[index + kCache32Count]; 467 } 468 fCache32PixelRef->unref(); 469 fCache32PixelRef = newPR; 470 fCache32 = (SkPMColor*)newPR->getAddr(); 471 } 472 complete_32bit_cache(fCache32, kCache32Count); 473 } 474 return fCache32; 475} 476 477/* 478 * Because our caller might rebuild the same (logically the same) gradient 479 * over and over, we'd like to return exactly the same "bitmap" if possible, 480 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). 481 * To do that, we maintain a private cache of built-bitmaps, based on our 482 * colors and positions. Note: we don't try to flatten the fMapper, so if one 483 * is present, we skip the cache for now. 484 */ 485void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { 486 // our caller assumes no external alpha, so we ensure that our cache is 487 // built with 0xFF 488 this->setCacheAlpha(0xFF); 489 490 // don't have a way to put the mapper into our cache-key yet 491 if (fMapper) { 492 // force our cahce32pixelref to be built 493 (void)this->getCache32(); 494 bitmap->setConfig(SkBitmap::kARGB_8888_Config, kGradient32Length, 1); 495 bitmap->setPixelRef(fCache32PixelRef); 496 return; 497 } 498 499 // build our key: [numColors + colors[] + {positions[]} ] 500 int count = 1 + fColorCount; 501 if (fColorCount > 2) { 502 count += fColorCount - 1; // fRecs[].fPos 503 } 504 505 SkAutoSTMalloc<16, int32_t> storage(count); 506 int32_t* buffer = storage.get(); 507 508 *buffer++ = fColorCount; 509 memcpy(buffer, fOrigColors, fColorCount * sizeof(SkColor)); 510 buffer += fColorCount; 511 if (fColorCount > 2) { 512 for (int i = 1; i < fColorCount; i++) { 513 *buffer++ = fRecs[i].fPos; 514 } 515 } 516 SkASSERT(buffer - storage.get() == count); 517 518 /////////////////////////////////// 519 520 SK_DECLARE_STATIC_MUTEX(gMutex); 521 static SkBitmapCache* gCache; 522 // each cache cost 1K of RAM, since each bitmap will be 1x256 at 32bpp 523 static const int MAX_NUM_CACHED_GRADIENT_BITMAPS = 32; 524 SkAutoMutexAcquire ama(gMutex); 525 526 if (NULL == gCache) { 527 gCache = SkNEW_ARGS(SkBitmapCache, (MAX_NUM_CACHED_GRADIENT_BITMAPS)); 528 } 529 size_t size = count * sizeof(int32_t); 530 531 if (!gCache->find(storage.get(), size, bitmap)) { 532 // force our cahce32pixelref to be built 533 (void)this->getCache32(); 534 // Only expose the linear section of the cache; don't let the caller 535 // know about the padding at the end to make interpolation faster. 536 bitmap->setConfig(SkBitmap::kARGB_8888_Config, kGradient32Length, 1); 537 bitmap->setPixelRef(fCache32PixelRef); 538 539 gCache->add(storage.get(), size, *bitmap); 540 } 541} 542 543void SkGradientShaderBase::commonAsAGradient(GradientInfo* info) const { 544 if (info) { 545 if (info->fColorCount >= fColorCount) { 546 if (info->fColors) { 547 memcpy(info->fColors, fOrigColors, 548 fColorCount * sizeof(SkColor)); 549 } 550 if (info->fColorOffsets) { 551 if (fColorCount == 2) { 552 info->fColorOffsets[0] = 0; 553 info->fColorOffsets[1] = SK_Scalar1; 554 } else if (fColorCount > 2) { 555 for (int i = 0; i < fColorCount; i++) 556 info->fColorOffsets[i] = SkFixedToScalar(fRecs[i].fPos); 557 } 558 } 559 } 560 info->fColorCount = fColorCount; 561 info->fTileMode = fTileMode; 562 } 563} 564 565/////////////////////////////////////////////////////////////////////////////// 566/////////////////////////////////////////////////////////////////////////////// 567 568#include "SkEmptyShader.h" 569 570// assumes colors is SkColor* and pos is SkScalar* 571#define EXPAND_1_COLOR(count) \ 572 SkColor tmp[2]; \ 573 do { \ 574 if (1 == count) { \ 575 tmp[0] = tmp[1] = colors[0]; \ 576 colors = tmp; \ 577 pos = NULL; \ 578 count = 2; \ 579 } \ 580 } while (0) 581 582SkShader* SkGradientShader::CreateLinear(const SkPoint pts[2], 583 const SkColor colors[], 584 const SkScalar pos[], int colorCount, 585 SkShader::TileMode mode, 586 SkUnitMapper* mapper) { 587 if (NULL == pts || NULL == colors || colorCount < 1) { 588 return NULL; 589 } 590 EXPAND_1_COLOR(colorCount); 591 592 return SkNEW_ARGS(SkLinearGradient, 593 (pts, colors, pos, colorCount, mode, mapper)); 594} 595 596SkShader* SkGradientShader::CreateRadial(const SkPoint& center, SkScalar radius, 597 const SkColor colors[], 598 const SkScalar pos[], int colorCount, 599 SkShader::TileMode mode, 600 SkUnitMapper* mapper) { 601 if (radius <= 0 || NULL == colors || colorCount < 1) { 602 return NULL; 603 } 604 EXPAND_1_COLOR(colorCount); 605 606 return SkNEW_ARGS(SkRadialGradient, 607 (center, radius, colors, pos, colorCount, mode, mapper)); 608} 609 610SkShader* SkGradientShader::CreateTwoPointRadial(const SkPoint& start, 611 SkScalar startRadius, 612 const SkPoint& end, 613 SkScalar endRadius, 614 const SkColor colors[], 615 const SkScalar pos[], 616 int colorCount, 617 SkShader::TileMode mode, 618 SkUnitMapper* mapper) { 619 if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) { 620 return NULL; 621 } 622 EXPAND_1_COLOR(colorCount); 623 624 return SkNEW_ARGS(SkTwoPointRadialGradient, 625 (start, startRadius, end, endRadius, colors, pos, 626 colorCount, mode, mapper)); 627} 628 629SkShader* SkGradientShader::CreateTwoPointConical(const SkPoint& start, 630 SkScalar startRadius, 631 const SkPoint& end, 632 SkScalar endRadius, 633 const SkColor colors[], 634 const SkScalar pos[], 635 int colorCount, 636 SkShader::TileMode mode, 637 SkUnitMapper* mapper) { 638 if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) { 639 return NULL; 640 } 641 if (start == end && startRadius == endRadius) { 642 return SkNEW(SkEmptyShader); 643 } 644 EXPAND_1_COLOR(colorCount); 645 646 return SkNEW_ARGS(SkTwoPointConicalGradient, 647 (start, startRadius, end, endRadius, colors, pos, 648 colorCount, mode, mapper)); 649} 650 651SkShader* SkGradientShader::CreateSweep(SkScalar cx, SkScalar cy, 652 const SkColor colors[], 653 const SkScalar pos[], 654 int count, SkUnitMapper* mapper) { 655 if (NULL == colors || count < 1) { 656 return NULL; 657 } 658 EXPAND_1_COLOR(count); 659 660 return SkNEW_ARGS(SkSweepGradient, (cx, cy, colors, pos, count, mapper)); 661} 662 663SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(SkGradientShader) 664 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkLinearGradient) 665 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkRadialGradient) 666 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkSweepGradient) 667 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkTwoPointRadialGradient) 668 SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(SkTwoPointConicalGradient) 669SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END 670 671/////////////////////////////////////////////////////////////////////////////// 672 673#if SK_SUPPORT_GPU 674 675#include "effects/GrTextureStripAtlas.h" 676#include "SkGr.h" 677 678GrGLGradientStage::GrGLGradientStage(const GrProgramStageFactory& factory) 679 : INHERITED(factory) 680 , fCachedYCoord(GR_ScalarMax) 681 , fFSYUni(GrGLUniformManager::kInvalidUniformHandle) { } 682 683GrGLGradientStage::~GrGLGradientStage() { } 684 685void GrGLGradientStage::setupVariables(GrGLShaderBuilder* builder) { 686 fFSYUni = builder->addUniform(GrGLShaderBuilder::kFragment_ShaderType, 687 kFloat_GrSLType, "GradientYCoordFS"); 688} 689 690void GrGLGradientStage::setData(const GrGLUniformManager& uman, 691 const GrCustomStage& stage, 692 const GrRenderTarget*, 693 int stageNum) { 694 GrScalar yCoord = static_cast<const GrGradientEffect&>(stage).getYCoord(); 695 if (yCoord != fCachedYCoord) { 696 uman.set1f(fFSYUni, yCoord); 697 fCachedYCoord = yCoord; 698 } 699} 700 701void GrGLGradientStage::emitColorLookup(GrGLShaderBuilder* builder, 702 const char* gradientTValue, 703 const char* outputColor, 704 const char* inputColor, 705 const char* samplerName) { 706 707 SkString* code = &builder->fFSCode; 708 code->appendf("\tvec2 coord = vec2(%s, %s);\n", 709 gradientTValue, 710 builder->getUniformVariable(fFSYUni).c_str()); 711 GrGLSLMulVarBy4f(code, 1, outputColor, inputColor); 712 code->appendf("\t%s = ", outputColor); 713 builder->appendTextureLookupAndModulate(code, inputColor, samplerName, "coord"); 714 code->append(";\n"); 715} 716 717///////////////////////////////////////////////////////////////////// 718 719GrGradientEffect::GrGradientEffect(GrContext* ctx, 720 const SkGradientShaderBase& shader, 721 GrSamplerState* sampler) 722 : fTexture (NULL) 723 , fUseTexture (true) { 724 // TODO: check for simple cases where we don't need a texture: 725 //GradientInfo info; 726 //shader.asAGradient(&info); 727 //if (info.fColorCount == 2) { ... 728 729 SkBitmap bitmap; 730 shader.getGradientTableBitmap(&bitmap); 731 732 GrTextureStripAtlas::Desc desc; 733 desc.fWidth = bitmap.width(); 734 desc.fHeight = 32; 735 desc.fRowHeight = bitmap.height(); 736 desc.fContext = ctx; 737 desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); 738 fAtlas = GrTextureStripAtlas::GetAtlas(desc); 739 GrAssert(NULL != fAtlas); 740 741 fRow = fAtlas->lockRow(bitmap); 742 if (-1 != fRow) { 743 fYCoord = fAtlas->getYOffset(fRow) + GR_ScalarHalf * 744 fAtlas->getVerticalScaleFactor(); 745 fTexture = fAtlas->getTexture(); 746 } else { 747 fTexture = GrLockCachedBitmapTexture(ctx, bitmap, sampler->textureParams()); 748 SkSafeRef(fTexture); 749 fYCoord = GR_ScalarHalf; 750 751 // Unlock immediately, this is not great, but we don't have a way of 752 // knowing when else to unlock it currently, so it may get purged from 753 // the cache, but it'll still be ref'd until it's no longer being used. 754 GrUnlockCachedBitmapTexture(fTexture); 755 } 756} 757 758GrGradientEffect::~GrGradientEffect() { 759 if (this->useAtlas()) { 760 fAtlas->unlockRow(fRow); 761 } else { 762 SkSafeUnref(fTexture); 763 } 764} 765 766unsigned int GrGradientEffect::numTextures() const { 767 return fUseTexture ? 1 : 0; 768} 769 770GrTexture* GrGradientEffect::texture(unsigned int index) 771 const { 772 GrAssert(fUseTexture && 0 == index); 773 return fTexture; 774} 775 776int GrGradientEffect::RandomGradientParams(SkRandom* random, 777 SkColor colors[], 778 SkScalar** stops, 779 SkShader::TileMode* tm) { 780 int outColors = random->nextRangeU(1, kMaxRandomGradientColors); 781 782 // if one color, omit stops, otherwise randomly decide whether or not to 783 if (outColors == 1 || (outColors >= 2 && random->nextBool())) { 784 *stops = NULL; 785 } 786 787 GrScalar stop = 0.f; 788 for (int i = 0; i < outColors; ++i) { 789 colors[i] = random->nextU(); 790 if (NULL != *stops) { 791 (*stops)[i] = stop; 792 stop = i < outColors - 1 ? stop + random->nextUScalar1() * (1.f - stop) : 1.f; 793 } 794 } 795 *tm = static_cast<SkShader::TileMode>(random->nextULessThan(SkShader::kTileModeCount)); 796 797 return outColors; 798} 799 800#endif 801