1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8#include "SkBenchmark.h" 9#include "SkMatrix.h" 10#include "SkMatrixUtils.h" 11#include "SkRandom.h" 12#include "SkString.h" 13 14class MatrixBench : public SkBenchmark { 15 SkString fName; 16public: 17 MatrixBench(const char name[]) { 18 fName.printf("matrix_%s", name); 19 } 20 21 virtual bool isSuitableFor(Backend backend) SK_OVERRIDE { 22 return backend == kNonRendering_Backend; 23 } 24 25 virtual void performTest() = 0; 26 27protected: 28 virtual int mulLoopCount() const { return 1; } 29 30 virtual const char* onGetName() { 31 return fName.c_str(); 32 } 33 34 virtual void onDraw(const int loops, SkCanvas*) { 35 for (int i = 0; i < loops; i++) { 36 this->performTest(); 37 } 38 } 39 40private: 41 typedef SkBenchmark INHERITED; 42}; 43 44// we want to stop the compiler from eliminating code that it thinks is a no-op 45// so we have a non-static global we increment, hoping that will convince the 46// compiler to execute everything 47int gMatrixBench_NonStaticGlobal; 48 49#define always_do(pred) \ 50 do { \ 51 if (pred) { \ 52 ++gMatrixBench_NonStaticGlobal; \ 53 } \ 54 } while (0) 55 56class EqualsMatrixBench : public MatrixBench { 57public: 58 EqualsMatrixBench() : INHERITED("equals") {} 59protected: 60 virtual void performTest() { 61 SkMatrix m0, m1, m2; 62 63 m0.reset(); 64 m1.reset(); 65 m2.reset(); 66 always_do(m0 == m1); 67 always_do(m1 == m2); 68 always_do(m2 == m0); 69 } 70private: 71 typedef MatrixBench INHERITED; 72}; 73 74class ScaleMatrixBench : public MatrixBench { 75public: 76 ScaleMatrixBench() : INHERITED("scale") { 77 fSX = fSY = 1.5f; 78 fM0.reset(); 79 fM1.setScale(fSX, fSY); 80 fM2.setTranslate(fSX, fSY); 81 } 82protected: 83 virtual void performTest() { 84 SkMatrix m; 85 m = fM0; m.preScale(fSX, fSY); 86 m = fM1; m.preScale(fSX, fSY); 87 m = fM2; m.preScale(fSX, fSY); 88 } 89private: 90 SkMatrix fM0, fM1, fM2; 91 SkScalar fSX, fSY; 92 typedef MatrixBench INHERITED; 93}; 94 95// having unknown values in our arrays can throw off the timing a lot, perhaps 96// handling NaN values is a lot slower. Anyway, this guy is just meant to put 97// reasonable values in our arrays. 98template <typename T> void init9(T array[9]) { 99 SkRandom rand; 100 for (int i = 0; i < 9; i++) { 101 array[i] = rand.nextSScalar1(); 102 } 103} 104 105// Test the performance of setConcat() non-perspective case: 106// using floating point precision only. 107class FloatConcatMatrixBench : public MatrixBench { 108public: 109 FloatConcatMatrixBench() : INHERITED("concat_floatfloat") { 110 init9(mya); 111 init9(myb); 112 init9(myr); 113 } 114protected: 115 virtual int mulLoopCount() const { return 4; } 116 117 static inline void muladdmul(float a, float b, float c, float d, 118 float* result) { 119 *result = a * b + c * d; 120 } 121 virtual void performTest() { 122 const float* a = mya; 123 const float* b = myb; 124 float* r = myr; 125 muladdmul(a[0], b[0], a[1], b[3], &r[0]); 126 muladdmul(a[0], b[1], a[1], b[4], &r[1]); 127 muladdmul(a[0], b[2], a[1], b[5], &r[2]); 128 r[2] += a[2]; 129 muladdmul(a[3], b[0], a[4], b[3], &r[3]); 130 muladdmul(a[3], b[1], a[4], b[4], &r[4]); 131 muladdmul(a[3], b[2], a[4], b[5], &r[5]); 132 r[5] += a[5]; 133 r[6] = r[7] = 0.0f; 134 r[8] = 1.0f; 135 } 136private: 137 float mya [9]; 138 float myb [9]; 139 float myr [9]; 140 typedef MatrixBench INHERITED; 141}; 142 143static inline float SkDoubleToFloat(double x) { 144 return static_cast<float>(x); 145} 146 147// Test the performance of setConcat() non-perspective case: 148// using floating point precision but casting up to float for 149// intermediate results during computations. 150class FloatDoubleConcatMatrixBench : public MatrixBench { 151public: 152 FloatDoubleConcatMatrixBench() : INHERITED("concat_floatdouble") { 153 init9(mya); 154 init9(myb); 155 init9(myr); 156 } 157protected: 158 virtual int mulLoopCount() const { return 4; } 159 160 static inline void muladdmul(float a, float b, float c, float d, 161 float* result) { 162 *result = SkDoubleToFloat((double)a * b + (double)c * d); 163 } 164 virtual void performTest() { 165 const float* a = mya; 166 const float* b = myb; 167 float* r = myr; 168 muladdmul(a[0], b[0], a[1], b[3], &r[0]); 169 muladdmul(a[0], b[1], a[1], b[4], &r[1]); 170 muladdmul(a[0], b[2], a[1], b[5], &r[2]); 171 r[2] += a[2]; 172 muladdmul(a[3], b[0], a[4], b[3], &r[3]); 173 muladdmul(a[3], b[1], a[4], b[4], &r[4]); 174 muladdmul(a[3], b[2], a[4], b[5], &r[5]); 175 r[5] += a[5]; 176 r[6] = r[7] = 0.0f; 177 r[8] = 1.0f; 178 } 179private: 180 float mya [9]; 181 float myb [9]; 182 float myr [9]; 183 typedef MatrixBench INHERITED; 184}; 185 186// Test the performance of setConcat() non-perspective case: 187// using double precision only. 188class DoubleConcatMatrixBench : public MatrixBench { 189public: 190 DoubleConcatMatrixBench() : INHERITED("concat_double") { 191 init9(mya); 192 init9(myb); 193 init9(myr); 194 } 195protected: 196 virtual int mulLoopCount() const { return 4; } 197 198 static inline void muladdmul(double a, double b, double c, double d, 199 double* result) { 200 *result = a * b + c * d; 201 } 202 virtual void performTest() { 203 const double* a = mya; 204 const double* b = myb; 205 double* r = myr; 206 muladdmul(a[0], b[0], a[1], b[3], &r[0]); 207 muladdmul(a[0], b[1], a[1], b[4], &r[1]); 208 muladdmul(a[0], b[2], a[1], b[5], &r[2]); 209 r[2] += a[2]; 210 muladdmul(a[3], b[0], a[4], b[3], &r[3]); 211 muladdmul(a[3], b[1], a[4], b[4], &r[4]); 212 muladdmul(a[3], b[2], a[4], b[5], &r[5]); 213 r[5] += a[5]; 214 r[6] = r[7] = 0.0; 215 r[8] = 1.0; 216 } 217private: 218 double mya [9]; 219 double myb [9]; 220 double myr [9]; 221 typedef MatrixBench INHERITED; 222}; 223 224class GetTypeMatrixBench : public MatrixBench { 225public: 226 GetTypeMatrixBench() 227 : INHERITED("gettype") { 228 fArray[0] = (float) fRnd.nextS(); 229 fArray[1] = (float) fRnd.nextS(); 230 fArray[2] = (float) fRnd.nextS(); 231 fArray[3] = (float) fRnd.nextS(); 232 fArray[4] = (float) fRnd.nextS(); 233 fArray[5] = (float) fRnd.nextS(); 234 fArray[6] = (float) fRnd.nextS(); 235 fArray[7] = (float) fRnd.nextS(); 236 fArray[8] = (float) fRnd.nextS(); 237 } 238protected: 239 // Putting random generation of the matrix inside performTest() 240 // would help us avoid anomalous runs, but takes up 25% or 241 // more of the function time. 242 virtual void performTest() { 243 fMatrix.setAll(fArray[0], fArray[1], fArray[2], 244 fArray[3], fArray[4], fArray[5], 245 fArray[6], fArray[7], fArray[8]); 246 always_do(fMatrix.getType()); 247 fMatrix.dirtyMatrixTypeCache(); 248 always_do(fMatrix.getType()); 249 fMatrix.dirtyMatrixTypeCache(); 250 always_do(fMatrix.getType()); 251 fMatrix.dirtyMatrixTypeCache(); 252 always_do(fMatrix.getType()); 253 fMatrix.dirtyMatrixTypeCache(); 254 always_do(fMatrix.getType()); 255 fMatrix.dirtyMatrixTypeCache(); 256 always_do(fMatrix.getType()); 257 fMatrix.dirtyMatrixTypeCache(); 258 always_do(fMatrix.getType()); 259 fMatrix.dirtyMatrixTypeCache(); 260 always_do(fMatrix.getType()); 261 } 262private: 263 SkMatrix fMatrix; 264 float fArray[9]; 265 SkRandom fRnd; 266 typedef MatrixBench INHERITED; 267}; 268 269class ScaleTransMixedMatrixBench : public MatrixBench { 270 public: 271 ScaleTransMixedMatrixBench() : INHERITED("scaletrans_mixed") { 272 fMatrix.setAll(fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1(), 273 fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1(), 274 fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1()); 275 int i; 276 for (i = 0; i < kCount; i++) { 277 fSrc[i].fX = fRandom.nextSScalar1(); 278 fSrc[i].fY = fRandom.nextSScalar1(); 279 fDst[i].fX = fRandom.nextSScalar1(); 280 fDst[i].fY = fRandom.nextSScalar1(); 281 } 282 } 283 protected: 284 virtual void performTest() { 285 SkPoint* dst = fDst; 286 const SkPoint* src = fSrc; 287 int count = kCount; 288 float mx = fMatrix[SkMatrix::kMScaleX]; 289 float my = fMatrix[SkMatrix::kMScaleY]; 290 float tx = fMatrix[SkMatrix::kMTransX]; 291 float ty = fMatrix[SkMatrix::kMTransY]; 292 do { 293 dst->fY = SkScalarMulAdd(src->fY, my, ty); 294 dst->fX = SkScalarMulAdd(src->fX, mx, tx); 295 src += 1; 296 dst += 1; 297 } while (--count); 298 } 299 private: 300 enum { 301 kCount = 16 302 }; 303 SkMatrix fMatrix; 304 SkPoint fSrc [kCount]; 305 SkPoint fDst [kCount]; 306 SkRandom fRandom; 307 typedef MatrixBench INHERITED; 308}; 309 310class ScaleTransDoubleMatrixBench : public MatrixBench { 311 public: 312 ScaleTransDoubleMatrixBench() : INHERITED("scaletrans_double") { 313 init9(fMatrix); 314 int i; 315 for (i = 0; i < kCount; i++) { 316 fSrc[i].fX = fRandom.nextSScalar1(); 317 fSrc[i].fY = fRandom.nextSScalar1(); 318 fDst[i].fX = fRandom.nextSScalar1(); 319 fDst[i].fY = fRandom.nextSScalar1(); 320 } 321 } 322 protected: 323 virtual void performTest() { 324 SkPoint* dst = fDst; 325 const SkPoint* src = fSrc; 326 int count = kCount; 327 // As doubles, on Z600 Linux systems this is 2.5x as expensive as mixed mode 328 float mx = (float) fMatrix[SkMatrix::kMScaleX]; 329 float my = (float) fMatrix[SkMatrix::kMScaleY]; 330 float tx = (float) fMatrix[SkMatrix::kMTransX]; 331 float ty = (float) fMatrix[SkMatrix::kMTransY]; 332 do { 333 dst->fY = src->fY * my + ty; 334 dst->fX = src->fX * mx + tx; 335 src += 1; 336 dst += 1; 337 } while (--count); 338 } 339 private: 340 enum { 341 kCount = 16 342 }; 343 double fMatrix [9]; 344 SkPoint fSrc [kCount]; 345 SkPoint fDst [kCount]; 346 SkRandom fRandom; 347 typedef MatrixBench INHERITED; 348}; 349 350class DecomposeMatrixBench : public MatrixBench { 351public: 352 DecomposeMatrixBench() : INHERITED("decompose") {} 353 354protected: 355 virtual void onPreDraw() { 356 for (int i = 0; i < 10; ++i) { 357 SkScalar rot0 = (fRandom.nextBool()) ? fRandom.nextRangeF(-180, 180) : 0.0f; 358 SkScalar sx = fRandom.nextRangeF(-3000.f, 3000.f); 359 SkScalar sy = (fRandom.nextBool()) ? fRandom.nextRangeF(-3000.f, 3000.f) : sx; 360 SkScalar rot1 = fRandom.nextRangeF(-180, 180); 361 fMatrix[i].setRotate(rot0); 362 fMatrix[i].postScale(sx, sy); 363 fMatrix[i].postRotate(rot1); 364 } 365 } 366 virtual void performTest() { 367 SkPoint rotation1, scale, rotation2; 368 for (int i = 0; i < 10; ++i) { 369 (void) SkDecomposeUpper2x2(fMatrix[i], &rotation1, &scale, &rotation2); 370 } 371 } 372private: 373 SkMatrix fMatrix[10]; 374 SkRandom fRandom; 375 typedef MatrixBench INHERITED; 376}; 377 378class InvertMapRectMatrixBench : public MatrixBench { 379public: 380 InvertMapRectMatrixBench(const char* name, int flags) 381 : INHERITED(name) 382 , fFlags(flags) { 383 fMatrix.reset(); 384 fIteration = 0; 385 if (flags & kScale_Flag) { 386 fMatrix.postScale(1.5f, 2.5f); 387 } 388 if (flags & kTranslate_Flag) { 389 fMatrix.postTranslate(1.5f, 2.5f); 390 } 391 if (flags & kRotate_Flag) { 392 fMatrix.postRotate(45.0f); 393 } 394 if (flags & kPerspective_Flag) { 395 fMatrix.setPerspX(1.5f); 396 fMatrix.setPerspY(2.5f); 397 } 398 if (0 == (flags & kUncachedTypeMask_Flag)) { 399 fMatrix.getType(); 400 } 401 } 402 enum Flag { 403 kScale_Flag = 0x01, 404 kTranslate_Flag = 0x02, 405 kRotate_Flag = 0x04, 406 kPerspective_Flag = 0x08, 407 kUncachedTypeMask_Flag = 0x10, 408 }; 409protected: 410 virtual void performTest() { 411 if (fFlags & kUncachedTypeMask_Flag) { 412 // This will invalidate the typemask without 413 // changing the matrix. 414 fMatrix.setPerspX(fMatrix.getPerspX()); 415 } 416 SkMatrix inv; 417 bool invertible = fMatrix.invert(&inv); 418 SkASSERT(invertible); 419 SkRect transformedRect; 420 // an arbitrary, small, non-zero rect to transform 421 SkRect srcRect = SkRect::MakeWH(SkIntToScalar(10), SkIntToScalar(10)); 422 if (invertible) { 423 inv.mapRect(&transformedRect, srcRect); 424 } 425 } 426private: 427 SkMatrix fMatrix; 428 int fFlags; 429 unsigned fIteration; 430 typedef MatrixBench INHERITED; 431}; 432 433/////////////////////////////////////////////////////////////////////////////// 434 435DEF_BENCH( return new EqualsMatrixBench(); ) 436DEF_BENCH( return new ScaleMatrixBench(); ) 437DEF_BENCH( return new FloatConcatMatrixBench(); ) 438DEF_BENCH( return new FloatDoubleConcatMatrixBench(); ) 439DEF_BENCH( return new DoubleConcatMatrixBench(); ) 440DEF_BENCH( return new GetTypeMatrixBench(); ) 441DEF_BENCH( return new DecomposeMatrixBench(); ) 442 443DEF_BENCH( return new InvertMapRectMatrixBench("invert_maprect_identity", 0); ) 444 445DEF_BENCH(return new InvertMapRectMatrixBench( 446 "invert_maprect_rectstaysrect", 447 InvertMapRectMatrixBench::kScale_Flag | 448 InvertMapRectMatrixBench::kTranslate_Flag); ) 449 450DEF_BENCH(return new InvertMapRectMatrixBench( 451 "invert_maprect_translate", 452 InvertMapRectMatrixBench::kTranslate_Flag); ) 453 454DEF_BENCH(return new InvertMapRectMatrixBench( 455 "invert_maprect_nonpersp", 456 InvertMapRectMatrixBench::kScale_Flag | 457 InvertMapRectMatrixBench::kRotate_Flag | 458 InvertMapRectMatrixBench::kTranslate_Flag); ) 459 460DEF_BENCH( return new InvertMapRectMatrixBench( 461 "invert_maprect_persp", 462 InvertMapRectMatrixBench::kPerspective_Flag); ) 463 464DEF_BENCH( return new InvertMapRectMatrixBench( 465 "invert_maprect_typemask_rectstaysrect", 466 InvertMapRectMatrixBench::kUncachedTypeMask_Flag | 467 InvertMapRectMatrixBench::kScale_Flag | 468 InvertMapRectMatrixBench::kTranslate_Flag); ) 469 470DEF_BENCH( return new InvertMapRectMatrixBench( 471 "invert_maprect_typemask_nonpersp", 472 InvertMapRectMatrixBench::kUncachedTypeMask_Flag | 473 InvertMapRectMatrixBench::kScale_Flag | 474 InvertMapRectMatrixBench::kRotate_Flag | 475 InvertMapRectMatrixBench::kTranslate_Flag); ) 476 477DEF_BENCH( return new ScaleTransMixedMatrixBench(); ) 478DEF_BENCH( return new ScaleTransDoubleMatrixBench(); ) 479