1/* 2 * Copyright 2014 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "SkTextureCompressor.h" 9#include "SkTextureCompressor_Blitter.h" 10 11#include "SkBlitter.h" 12#include "SkEndian.h" 13 14// #define COMPRESS_R11_EAC_SLOW 1 15// #define COMPRESS_R11_EAC_FAST 1 16#define COMPRESS_R11_EAC_FASTEST 1 17 18// Blocks compressed into R11 EAC are represented as follows: 19// 0000000000000000000000000000000000000000000000000000000000000000 20// |base_cw|mod|mul| ----------------- indices ------------------- 21// 22// To reconstruct the value of a given pixel, we use the formula: 23// clamp[0, 2047](base_cw * 8 + 4 + mod_val*mul*8) 24// 25// mod_val is chosen from a palette of values based on the index of the 26// given pixel. The palette is chosen by the value stored in mod. 27// This formula returns a value between 0 and 2047, which is converted 28// to a float from 0 to 1 in OpenGL. 29// 30// If mul is zero, then we set mul = 1/8, so that the formula becomes 31// clamp[0, 2047](base_cw * 8 + 4 + mod_val) 32 33static const int kNumR11EACPalettes = 16; 34static const int kR11EACPaletteSize = 8; 35static const int kR11EACModifierPalettes[kNumR11EACPalettes][kR11EACPaletteSize] = { 36 {-3, -6, -9, -15, 2, 5, 8, 14}, 37 {-3, -7, -10, -13, 2, 6, 9, 12}, 38 {-2, -5, -8, -13, 1, 4, 7, 12}, 39 {-2, -4, -6, -13, 1, 3, 5, 12}, 40 {-3, -6, -8, -12, 2, 5, 7, 11}, 41 {-3, -7, -9, -11, 2, 6, 8, 10}, 42 {-4, -7, -8, -11, 3, 6, 7, 10}, 43 {-3, -5, -8, -11, 2, 4, 7, 10}, 44 {-2, -6, -8, -10, 1, 5, 7, 9}, 45 {-2, -5, -8, -10, 1, 4, 7, 9}, 46 {-2, -4, -8, -10, 1, 3, 7, 9}, 47 {-2, -5, -7, -10, 1, 4, 6, 9}, 48 {-3, -4, -7, -10, 2, 3, 6, 9}, 49 {-1, -2, -3, -10, 0, 1, 2, 9}, 50 {-4, -6, -8, -9, 3, 5, 7, 8}, 51 {-3, -5, -7, -9, 2, 4, 6, 8} 52}; 53 54#if COMPRESS_R11_EAC_SLOW 55 56// Pack the base codeword, palette, and multiplier into the 64 bits necessary 57// to decode it. 58static uint64_t pack_r11eac_block(uint16_t base_cw, uint16_t palette, uint16_t multiplier, 59 uint64_t indices) { 60 SkASSERT(palette < 16); 61 SkASSERT(multiplier < 16); 62 SkASSERT(indices < (static_cast<uint64_t>(1) << 48)); 63 64 const uint64_t b = static_cast<uint64_t>(base_cw) << 56; 65 const uint64_t m = static_cast<uint64_t>(multiplier) << 52; 66 const uint64_t p = static_cast<uint64_t>(palette) << 48; 67 return SkEndian_SwapBE64(b | m | p | indices); 68} 69 70// Given a base codeword, a modifier, and a multiplier, compute the proper 71// pixel value in the range [0, 2047]. 72static uint16_t compute_r11eac_pixel(int base_cw, int modifier, int multiplier) { 73 int ret = (base_cw * 8 + 4) + (modifier * multiplier * 8); 74 return (ret > 2047)? 2047 : ((ret < 0)? 0 : ret); 75} 76 77// Compress a block into R11 EAC format. 78// The compression works as follows: 79// 1. Find the center of the span of the block's values. Use this as the base codeword. 80// 2. Choose a multiplier based roughly on the size of the span of block values 81// 3. Iterate through each palette and choose the one with the most accurate 82// modifiers. 83static inline uint64_t compress_heterogeneous_r11eac_block(const uint8_t block[16]) { 84 // Find the center of the data... 85 uint16_t bmin = block[0]; 86 uint16_t bmax = block[0]; 87 for (int i = 1; i < 16; ++i) { 88 bmin = SkTMin<uint16_t>(bmin, block[i]); 89 bmax = SkTMax<uint16_t>(bmax, block[i]); 90 } 91 92 uint16_t center = (bmax + bmin) >> 1; 93 SkASSERT(center <= 255); 94 95 // Based on the min and max, we can guesstimate a proper multiplier 96 // This is kind of a magic choice to start with. 97 uint16_t multiplier = (bmax - center) / 10; 98 99 // Now convert the block to 11 bits and transpose it to match 100 // the proper layout 101 uint16_t cblock[16]; 102 for (int i = 0; i < 4; ++i) { 103 for (int j = 0; j < 4; ++j) { 104 int srcIdx = i*4+j; 105 int dstIdx = j*4+i; 106 cblock[dstIdx] = (block[srcIdx] << 3) | (block[srcIdx] >> 5); 107 } 108 } 109 110 // Finally, choose the proper palette and indices 111 uint32_t bestError = 0xFFFFFFFF; 112 uint64_t bestIndices = 0; 113 uint16_t bestPalette = 0; 114 for (uint16_t paletteIdx = 0; paletteIdx < kNumR11EACPalettes; ++paletteIdx) { 115 const int *palette = kR11EACModifierPalettes[paletteIdx]; 116 117 // Iterate through each pixel to find the best palette index 118 // and update the indices with the choice. Also store the error 119 // for this palette to be compared against the best error... 120 uint32_t error = 0; 121 uint64_t indices = 0; 122 for (int pixelIdx = 0; pixelIdx < 16; ++pixelIdx) { 123 const uint16_t pixel = cblock[pixelIdx]; 124 125 // Iterate through each palette value to find the best index 126 // for this particular pixel for this particular palette. 127 uint16_t bestPixelError = 128 abs_diff(pixel, compute_r11eac_pixel(center, palette[0], multiplier)); 129 int bestIndex = 0; 130 for (int i = 1; i < kR11EACPaletteSize; ++i) { 131 const uint16_t p = compute_r11eac_pixel(center, palette[i], multiplier); 132 const uint16_t perror = abs_diff(pixel, p); 133 134 // Is this index better? 135 if (perror < bestPixelError) { 136 bestIndex = i; 137 bestPixelError = perror; 138 } 139 } 140 141 SkASSERT(bestIndex < 8); 142 143 error += bestPixelError; 144 indices <<= 3; 145 indices |= bestIndex; 146 } 147 148 SkASSERT(indices < (static_cast<uint64_t>(1) << 48)); 149 150 // Is this palette better? 151 if (error < bestError) { 152 bestPalette = paletteIdx; 153 bestIndices = indices; 154 bestError = error; 155 } 156 } 157 158 // Finally, pack everything together... 159 return pack_r11eac_block(center, bestPalette, multiplier, bestIndices); 160} 161#endif // COMPRESS_R11_EAC_SLOW 162 163#if COMPRESS_R11_EAC_FAST 164// This function takes into account that most blocks that we compress have a gradation from 165// fully opaque to fully transparent. The compression scheme works by selecting the 166// palette and multiplier that has the tightest fit to the 0-255 range. This is encoded 167// as the block header (0x8490). The indices are then selected by considering the top 168// three bits of each alpha value. For alpha masks, this reduces the dynamic range from 169// 17 to 8, but the quality is still acceptable. 170// 171// There are a few caveats that need to be taken care of... 172// 173// 1. The block is read in as scanlines, so the indices are stored as: 174// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 175// However, the decomrpession routine reads them in column-major order, so they 176// need to be packed as: 177// 0 4 8 12 1 5 9 13 2 6 10 14 3 7 11 15 178// So when reading, they must be transposed. 179// 180// 2. We cannot use the top three bits as an index directly, since the R11 EAC palettes 181// above store the modulation values first decreasing and then increasing: 182// e.g. {-3, -6, -9, -15, 2, 5, 8, 14} 183// Hence, we need to convert the indices with the following mapping: 184// From: 0 1 2 3 4 5 6 7 185// To: 3 2 1 0 4 5 6 7 186static inline uint64_t compress_heterogeneous_r11eac_block(const uint8_t block[16]) { 187 uint64_t retVal = static_cast<uint64_t>(0x8490) << 48; 188 for(int i = 0; i < 4; ++i) { 189 for(int j = 0; j < 4; ++j) { 190 const int shift = 45-3*(j*4+i); 191 SkASSERT(shift <= 45); 192 const uint64_t idx = block[i*4+j] >> 5; 193 SkASSERT(idx < 8); 194 195 // !SPEED! This is slightly faster than having an if-statement. 196 switch(idx) { 197 case 0: 198 case 1: 199 case 2: 200 case 3: 201 retVal |= (3-idx) << shift; 202 break; 203 default: 204 retVal |= idx << shift; 205 break; 206 } 207 } 208 } 209 210 return SkEndian_SwapBE64(retVal); 211} 212#endif // COMPRESS_R11_EAC_FAST 213 214#if (COMPRESS_R11_EAC_SLOW) || (COMPRESS_R11_EAC_FAST) 215static uint64_t compress_r11eac_block(const uint8_t block[16]) { 216 // Are all blocks a solid color? 217 bool solid = true; 218 for (int i = 1; i < 16; ++i) { 219 if (block[i] != block[0]) { 220 solid = false; 221 break; 222 } 223 } 224 225 if (solid) { 226 switch(block[0]) { 227 // Fully transparent? We know the encoding... 228 case 0: 229 // (0x0020 << 48) produces the following: 230 // basw_cw: 0 231 // mod: 0, palette: {-3, -6, -9, -15, 2, 5, 8, 14} 232 // multiplier: 2 233 // mod_val: -3 234 // 235 // this gives the following formula: 236 // clamp[0, 2047](0*8+4+(-3)*2*8) = 0 237 // 238 // Furthermore, it is impervious to endianness: 239 // 0x0020000000002000ULL 240 // Will produce one pixel with index 2, which gives: 241 // clamp[0, 2047](0*8+4+(-9)*2*8) = 0 242 return 0x0020000000002000ULL; 243 244 // Fully opaque? We know this encoding too... 245 case 255: 246 247 // -1 produces the following: 248 // basw_cw: 255 249 // mod: 15, palette: {-3, -5, -7, -9, 2, 4, 6, 8} 250 // mod_val: 8 251 // 252 // this gives the following formula: 253 // clamp[0, 2047](255*8+4+8*8*8) = clamp[0, 2047](2556) = 2047 254 return 0xFFFFFFFFFFFFFFFFULL; 255 256 default: 257 // !TODO! krajcevski: 258 // This will probably never happen, since we're using this format 259 // primarily for compressing alpha maps. Usually the only 260 // non-fullly opaque or fully transparent blocks are not a solid 261 // intermediate color. If we notice that they are, then we can 262 // add another optimization... 263 break; 264 } 265 } 266 267 return compress_heterogeneous_r11eac_block(block); 268} 269 270// This function is used by R11 EAC to compress 4x4 blocks 271// of 8-bit alpha into 64-bit values that comprise the compressed data. 272// We need to make sure that the dimensions of the src pixels are divisible 273// by 4, and copy 4x4 blocks one at a time for compression. 274typedef uint64_t (*A84x4To64BitProc)(const uint8_t block[]); 275 276static bool compress_4x4_a8_to_64bit(uint8_t* dst, const uint8_t* src, 277 int width, int height, int rowBytes, 278 A84x4To64BitProc proc) { 279 // Make sure that our data is well-formed enough to be considered for compression 280 if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) { 281 return false; 282 } 283 284 int blocksX = width >> 2; 285 int blocksY = height >> 2; 286 287 uint8_t block[16]; 288 uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst); 289 for (int y = 0; y < blocksY; ++y) { 290 for (int x = 0; x < blocksX; ++x) { 291 // Load block 292 for (int k = 0; k < 4; ++k) { 293 memcpy(block + k*4, src + k*rowBytes + 4*x, 4); 294 } 295 296 // Compress it 297 *encPtr = proc(block); 298 ++encPtr; 299 } 300 src += 4 * rowBytes; 301 } 302 303 return true; 304} 305#endif // (COMPRESS_R11_EAC_SLOW) || (COMPRESS_R11_EAC_FAST) 306 307// This function converts an integer containing four bytes of alpha 308// values into an integer containing four bytes of indices into R11 EAC. 309// Note, there needs to be a mapping of indices: 310// 0 1 2 3 4 5 6 7 311// 3 2 1 0 4 5 6 7 312// 313// To compute this, we first negate each byte, and then add three, which 314// gives the mapping 315// 3 2 1 0 -1 -2 -3 -4 316// 317// Then we mask out the negative values, take their absolute value, and 318// add three. 319// 320// Most of the voodoo in this function comes from Hacker's Delight, section 2-18 321static inline uint32_t convert_indices(uint32_t x) { 322 // Take the top three bits... 323 x = (x & 0xE0E0E0E0) >> 5; 324 325 // Negate... 326 x = ~((0x80808080 - x) ^ 0x7F7F7F7F); 327 328 // Add three 329 const uint32_t s = (x & 0x7F7F7F7F) + 0x03030303; 330 x = ((x ^ 0x03030303) & 0x80808080) ^ s; 331 332 // Absolute value 333 const uint32_t a = x & 0x80808080; 334 const uint32_t b = a >> 7; 335 336 // Aside: mask negatives (m is three if the byte was negative) 337 const uint32_t m = (a >> 6) | b; 338 339 // .. continue absolute value 340 x = (x ^ ((a - b) | a)) + b; 341 342 // Add three 343 return x + m; 344} 345 346#if COMPRESS_R11_EAC_FASTEST 347template<unsigned shift> 348static inline uint64_t swap_shift(uint64_t x, uint64_t mask) { 349 const uint64_t t = (x ^ (x >> shift)) & mask; 350 return x ^ t ^ (t << shift); 351} 352 353static inline uint64_t interleave6(uint64_t topRows, uint64_t bottomRows) { 354 // If our 3-bit block indices are laid out as: 355 // a b c d 356 // e f g h 357 // i j k l 358 // m n o p 359 // 360 // This function expects topRows and bottomRows to contain the first two rows 361 // of indices interleaved in the least significant bits of a and b. In other words... 362 // 363 // If the architecture is big endian, then topRows and bottomRows will contain the following: 364 // Bits 31-0: 365 // a: 00 a e 00 b f 00 c g 00 d h 366 // b: 00 i m 00 j n 00 k o 00 l p 367 // 368 // If the architecture is little endian, then topRows and bottomRows will contain 369 // the following: 370 // Bits 31-0: 371 // a: 00 d h 00 c g 00 b f 00 a e 372 // b: 00 l p 00 k o 00 j n 00 i m 373 // 374 // This function returns a 48-bit packing of the form: 375 // a e i m b f j n c g k o d h l p 376 // 377 // !SPEED! this function might be even faster if certain SIMD intrinsics are 378 // used.. 379 380 // For both architectures, we can figure out a packing of the bits by 381 // using a shuffle and a few shift-rotates... 382 uint64_t x = (static_cast<uint64_t>(topRows) << 32) | static_cast<uint64_t>(bottomRows); 383 384 // x: 00 a e 00 b f 00 c g 00 d h 00 i m 00 j n 00 k o 00 l p 385 386 x = swap_shift<10>(x, 0x3FC0003FC00000ULL); 387 388 // x: b f 00 00 00 a e c g i m 00 00 00 d h j n 00 k o 00 l p 389 390 x = (x | ((x << 52) & (0x3FULL << 52)) | ((x << 20) & (0x3FULL << 28))) >> 16; 391 392 // x: 00 00 00 00 00 00 00 00 b f l p a e c g i m k o d h j n 393 394 x = swap_shift<6>(x, 0xFC0000ULL); 395 396#if defined (SK_CPU_BENDIAN) 397 // x: 00 00 00 00 00 00 00 00 b f l p a e i m c g k o d h j n 398 399 x = swap_shift<36>(x, 0x3FULL); 400 401 // x: 00 00 00 00 00 00 00 00 b f j n a e i m c g k o d h l p 402 403 x = swap_shift<12>(x, 0xFFF000000ULL); 404#else 405 // If our CPU is little endian, then the above logic will 406 // produce the following indices: 407 // x: 00 00 00 00 00 00 00 00 c g i m d h l p b f j n a e k o 408 409 x = swap_shift<36>(x, 0xFC0ULL); 410 411 // x: 00 00 00 00 00 00 00 00 a e i m d h l p b f j n c g k o 412 413 x = (x & (0xFFFULL << 36)) | ((x & 0xFFFFFFULL) << 12) | ((x >> 24) & 0xFFFULL); 414#endif 415 416 // x: 00 00 00 00 00 00 00 00 a e i m b f j n c g k o d h l p 417 return x; 418} 419 420// This function follows the same basic procedure as compress_heterogeneous_r11eac_block 421// above when COMPRESS_R11_EAC_FAST is defined, but it avoids a few loads/stores and 422// tries to optimize where it can using SIMD. 423static uint64_t compress_r11eac_block_fast(const uint8_t* src, int rowBytes) { 424 // Store each row of alpha values in an integer 425 const uint32_t alphaRow1 = *(reinterpret_cast<const uint32_t*>(src)); 426 const uint32_t alphaRow2 = *(reinterpret_cast<const uint32_t*>(src + rowBytes)); 427 const uint32_t alphaRow3 = *(reinterpret_cast<const uint32_t*>(src + 2*rowBytes)); 428 const uint32_t alphaRow4 = *(reinterpret_cast<const uint32_t*>(src + 3*rowBytes)); 429 430 // Check for solid blocks. The explanations for these values 431 // can be found in the comments of compress_r11eac_block above 432 if (alphaRow1 == alphaRow2 && alphaRow1 == alphaRow3 && alphaRow1 == alphaRow4) { 433 if (0 == alphaRow1) { 434 // Fully transparent block 435 return 0x0020000000002000ULL; 436 } else if (0xFFFFFFFF == alphaRow1) { 437 // Fully opaque block 438 return 0xFFFFFFFFFFFFFFFFULL; 439 } 440 } 441 442 // Convert each integer of alpha values into an integer of indices 443 const uint32_t indexRow1 = convert_indices(alphaRow1); 444 const uint32_t indexRow2 = convert_indices(alphaRow2); 445 const uint32_t indexRow3 = convert_indices(alphaRow3); 446 const uint32_t indexRow4 = convert_indices(alphaRow4); 447 448 // Interleave the indices from the top two rows and bottom two rows 449 // prior to passing them to interleave6. Since each index is at most 450 // three bits, then each byte can hold two indices... The way that the 451 // compression scheme expects the packing allows us to efficiently pack 452 // the top two rows and bottom two rows. Interleaving each 6-bit sequence 453 // and tightly packing it into a uint64_t is a little trickier, which is 454 // taken care of in interleave6. 455 const uint32_t r1r2 = (indexRow1 << 3) | indexRow2; 456 const uint32_t r3r4 = (indexRow3 << 3) | indexRow4; 457 const uint64_t indices = interleave6(r1r2, r3r4); 458 459 // Return the packed incdices in the least significant bits with the magic header 460 return SkEndian_SwapBE64(0x8490000000000000ULL | indices); 461} 462 463static bool compress_a8_to_r11eac_fast(uint8_t* dst, const uint8_t* src, 464 int width, int height, int rowBytes) { 465 // Make sure that our data is well-formed enough to be considered for compression 466 if (0 == width || 0 == height || (width % 4) != 0 || (height % 4) != 0) { 467 return false; 468 } 469 470 const int blocksX = width >> 2; 471 const int blocksY = height >> 2; 472 473 uint64_t* encPtr = reinterpret_cast<uint64_t*>(dst); 474 for (int y = 0; y < blocksY; ++y) { 475 for (int x = 0; x < blocksX; ++x) { 476 // Compress it 477 *encPtr = compress_r11eac_block_fast(src + 4*x, rowBytes); 478 ++encPtr; 479 } 480 src += 4 * rowBytes; 481 } 482 return true; 483} 484#endif // COMPRESS_R11_EAC_FASTEST 485 486//////////////////////////////////////////////////////////////////////////////// 487// 488// Utility functions used by the blitter 489// 490//////////////////////////////////////////////////////////////////////////////// 491 492// The R11 EAC format expects that indices are given in column-major order. Since 493// we receive alpha values in raster order, this usually means that we have to use 494// pack6 above to properly pack our indices. However, if our indices come from the 495// blitter, then each integer will be a column of indices, and hence can be efficiently 496// packed. This function takes the bottom three bits of each byte and places them in 497// the least significant 12 bits of the resulting integer. 498static inline uint32_t pack_indices_vertical(uint32_t x) { 499#if defined (SK_CPU_BENDIAN) 500 return 501 (x & 7) | 502 ((x >> 5) & (7 << 3)) | 503 ((x >> 10) & (7 << 6)) | 504 ((x >> 15) & (7 << 9)); 505#else 506 return 507 ((x >> 24) & 7) | 508 ((x >> 13) & (7 << 3)) | 509 ((x >> 2) & (7 << 6)) | 510 ((x << 9) & (7 << 9)); 511#endif 512} 513 514// This function returns the compressed format of a block given as four columns of 515// alpha values. Each column is assumed to be loaded from top to bottom, and hence 516// must first be converted to indices and then packed into the resulting 64-bit 517// integer. 518inline void compress_block_vertical(uint8_t* dstPtr, const uint8_t *block) { 519 520 const uint32_t* src = reinterpret_cast<const uint32_t*>(block); 521 uint64_t* dst = reinterpret_cast<uint64_t*>(dstPtr); 522 523 const uint32_t alphaColumn0 = src[0]; 524 const uint32_t alphaColumn1 = src[1]; 525 const uint32_t alphaColumn2 = src[2]; 526 const uint32_t alphaColumn3 = src[3]; 527 528 if (alphaColumn0 == alphaColumn1 && 529 alphaColumn2 == alphaColumn3 && 530 alphaColumn0 == alphaColumn2) { 531 532 if (0 == alphaColumn0) { 533 // Transparent 534 *dst = 0x0020000000002000ULL; 535 return; 536 } 537 else if (0xFFFFFFFF == alphaColumn0) { 538 // Opaque 539 *dst = 0xFFFFFFFFFFFFFFFFULL; 540 return; 541 } 542 } 543 544 const uint32_t indexColumn0 = convert_indices(alphaColumn0); 545 const uint32_t indexColumn1 = convert_indices(alphaColumn1); 546 const uint32_t indexColumn2 = convert_indices(alphaColumn2); 547 const uint32_t indexColumn3 = convert_indices(alphaColumn3); 548 549 const uint32_t packedIndexColumn0 = pack_indices_vertical(indexColumn0); 550 const uint32_t packedIndexColumn1 = pack_indices_vertical(indexColumn1); 551 const uint32_t packedIndexColumn2 = pack_indices_vertical(indexColumn2); 552 const uint32_t packedIndexColumn3 = pack_indices_vertical(indexColumn3); 553 554 *dst = SkEndian_SwapBE64(0x8490000000000000ULL | 555 (static_cast<uint64_t>(packedIndexColumn0) << 36) | 556 (static_cast<uint64_t>(packedIndexColumn1) << 24) | 557 static_cast<uint64_t>(packedIndexColumn2 << 12) | 558 static_cast<uint64_t>(packedIndexColumn3)); 559} 560 561static inline int get_r11_eac_index(uint64_t block, int x, int y) { 562 SkASSERT(x >= 0 && x < 4); 563 SkASSERT(y >= 0 && y < 4); 564 const int idx = x*4 + y; 565 return (block >> ((15-idx)*3)) & 0x7; 566} 567 568static void decompress_r11_eac_block(uint8_t* dst, int dstRowBytes, const uint8_t* src) { 569 const uint64_t block = SkEndian_SwapBE64(*(reinterpret_cast<const uint64_t *>(src))); 570 571 const int base_cw = (block >> 56) & 0xFF; 572 const int mod = (block >> 52) & 0xF; 573 const int palette_idx = (block >> 48) & 0xF; 574 575 const int* palette = kR11EACModifierPalettes[palette_idx]; 576 577 for (int j = 0; j < 4; ++j) { 578 for (int i = 0; i < 4; ++i) { 579 const int idx = get_r11_eac_index(block, i, j); 580 const int val = base_cw*8 + 4 + palette[idx]*mod*8; 581 if (val < 0) { 582 dst[i] = 0; 583 } else if (val > 2047) { 584 dst[i] = 0xFF; 585 } else { 586 dst[i] = (val >> 3) & 0xFF; 587 } 588 } 589 dst += dstRowBytes; 590 } 591} 592 593// This is the type passed as the CompressorType argument of the compressed 594// blitter for the R11 EAC format. The static functions required to be in this 595// struct are documented in SkTextureCompressor_Blitter.h 596struct CompressorR11EAC { 597 static inline void CompressA8Vertical(uint8_t* dst, const uint8_t* src) { 598 compress_block_vertical(dst, src); 599 } 600 601 static inline void CompressA8Horizontal(uint8_t* dst, const uint8_t* src, 602 int srcRowBytes) { 603 *(reinterpret_cast<uint64_t*>(dst)) = compress_r11eac_block_fast(src, srcRowBytes); 604 } 605 606#if PEDANTIC_BLIT_RECT 607 static inline void UpdateBlock(uint8_t* dst, const uint8_t* src, int srcRowBytes, 608 const uint8_t* mask) { 609 // TODO: krajcevski 610 // The implementation of this function should be similar to that of LATC, since 611 // the R11EAC indices directly correspond to pixel values. 612 SkFAIL("Implement me!"); 613 } 614#endif 615}; 616 617//////////////////////////////////////////////////////////////////////////////// 618 619namespace SkTextureCompressor { 620 621bool CompressA8ToR11EAC(uint8_t* dst, const uint8_t* src, int width, int height, int rowBytes) { 622 623#if (COMPRESS_R11_EAC_SLOW) || (COMPRESS_R11_EAC_FAST) 624 625 return compress_4x4_a8_to_64bit(dst, src, width, height, rowBytes, compress_r11eac_block); 626 627#elif COMPRESS_R11_EAC_FASTEST 628 629 return compress_a8_to_r11eac_fast(dst, src, width, height, rowBytes); 630 631#else 632#error "Must choose R11 EAC algorithm" 633#endif 634} 635 636SkBlitter* CreateR11EACBlitter(int width, int height, void* outputBuffer, 637 SkTBlitterAllocator* allocator) { 638 639 if ((width % 4) != 0 || (height % 4) != 0) { 640 return NULL; 641 } 642 643 // Memset the output buffer to an encoding that decodes to zero. We must do this 644 // in order to avoid having uninitialized values in the buffer if the blitter 645 // decides not to write certain scanlines (and skip entire rows of blocks). 646 // In the case of R11, we use the encoding from recognizing all zero pixels from above. 647 const int nBlocks = (width * height / 16); // 4x4 pixel blocks. 648 uint64_t *dst = reinterpret_cast<uint64_t *>(outputBuffer); 649 for (int i = 0; i < nBlocks; ++i) { 650 *dst = 0x0020000000002000ULL; 651 ++dst; 652 } 653 654 return allocator->createT< 655 SkTCompressedAlphaBlitter<4, 8, CompressorR11EAC>, int, int, void*> 656 (width, height, outputBuffer); 657} 658 659void DecompressR11EAC(uint8_t* dst, int dstRowBytes, const uint8_t* src, int width, int height) { 660 for (int j = 0; j < height; j += 4) { 661 for (int i = 0; i < width; i += 4) { 662 decompress_r11_eac_block(dst + i, dstRowBytes, src); 663 src += 8; 664 } 665 dst += 4 * dstRowBytes; 666 } 667} 668 669} // namespace SkTextureCompressor 670