MatrixBench.cpp revision 80bacfeb4bda06541e8695bd502229727bccfea
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8#include "SkBenchmark.h"
9#include "SkMatrix.h"
10#include "SkRandom.h"
11#include "SkString.h"
12
13class MatrixBench : public SkBenchmark {
14    SkString    fName;
15    enum { N = 100000 };
16public:
17    MatrixBench(void* param, const char name[]) : INHERITED(param) {
18        fName.printf("matrix_%s", name);
19        fIsRendering = false;
20    }
21
22    virtual void performTest() = 0;
23
24protected:
25    virtual int mulLoopCount() const { return 1; }
26
27    virtual const char* onGetName() {
28        return fName.c_str();
29    }
30
31    virtual void onDraw(SkCanvas* canvas) {
32        int n = SkBENCHLOOP(N * this->mulLoopCount());
33        for (int i = 0; i < n; i++) {
34            this->performTest();
35        }
36    }
37
38private:
39    typedef SkBenchmark INHERITED;
40};
41
42// we want to stop the compiler from eliminating code that it thinks is a no-op
43// so we have a non-static global we increment, hoping that will convince the
44// compiler to execute everything
45int gMatrixBench_NonStaticGlobal;
46
47#define always_do(pred)                     \
48    do {                                    \
49        if (pred) {                         \
50            ++gMatrixBench_NonStaticGlobal; \
51        }                                   \
52    } while (0)
53
54class EqualsMatrixBench : public MatrixBench {
55public:
56    EqualsMatrixBench(void* param) : INHERITED(param, "equals") {}
57protected:
58    virtual void performTest() {
59        SkMatrix m0, m1, m2;
60
61        m0.reset();
62        m1.reset();
63        m2.reset();
64        always_do(m0 == m1);
65        always_do(m1 == m2);
66        always_do(m2 == m0);
67    }
68private:
69    typedef MatrixBench INHERITED;
70};
71
72class ScaleMatrixBench : public MatrixBench {
73public:
74    ScaleMatrixBench(void* param) : INHERITED(param, "scale") {
75        fSX = fSY = SkFloatToScalar(1.5f);
76        fM0.reset();
77        fM1.setScale(fSX, fSY);
78        fM2.setTranslate(fSX, fSY);
79    }
80protected:
81    virtual void performTest() {
82        SkMatrix m;
83        m = fM0; m.preScale(fSX, fSY);
84        m = fM1; m.preScale(fSX, fSY);
85        m = fM2; m.preScale(fSX, fSY);
86    }
87private:
88    SkMatrix fM0, fM1, fM2;
89    SkScalar fSX, fSY;
90    typedef MatrixBench INHERITED;
91};
92
93// having unknown values in our arrays can throw off the timing a lot, perhaps
94// handling NaN values is a lot slower. Anyway, this guy is just meant to put
95// reasonable values in our arrays.
96template <typename T> void init9(T array[9]) {
97    SkRandom rand;
98    for (int i = 0; i < 9; i++) {
99        array[i] = rand.nextSScalar1();
100    }
101}
102
103// Test the performance of setConcat() non-perspective case:
104// using floating point precision only.
105class FloatConcatMatrixBench : public MatrixBench {
106public:
107    FloatConcatMatrixBench(void* p) : INHERITED(p, "concat_floatfloat") {
108        init9(mya);
109        init9(myb);
110        init9(myr);
111    }
112protected:
113    virtual int mulLoopCount() const { return 4; }
114
115    static inline void muladdmul(float a, float b, float c, float d,
116                                   float* result) {
117      *result = a * b + c * d;
118    }
119    virtual void performTest() {
120        const float* a = mya;
121        const float* b = myb;
122        float* r = myr;
123        muladdmul(a[0], b[0], a[1], b[3], &r[0]);
124        muladdmul(a[0], b[1], a[1], b[4], &r[1]);
125        muladdmul(a[0], b[2], a[1], b[5], &r[2]);
126        r[2] += a[2];
127        muladdmul(a[3], b[0], a[4], b[3], &r[3]);
128        muladdmul(a[3], b[1], a[4], b[4], &r[4]);
129        muladdmul(a[3], b[2], a[4], b[5], &r[5]);
130        r[5] += a[5];
131        r[6] = r[7] = 0.0f;
132        r[8] = 1.0f;
133    }
134private:
135    float mya [9];
136    float myb [9];
137    float myr [9];
138    typedef MatrixBench INHERITED;
139};
140
141static inline float SkDoubleToFloat(double x) {
142    return static_cast<float>(x);
143}
144
145// Test the performance of setConcat() non-perspective case:
146// using floating point precision but casting up to float for
147// intermediate results during computations.
148class FloatDoubleConcatMatrixBench : public MatrixBench {
149public:
150    FloatDoubleConcatMatrixBench(void* p) : INHERITED(p, "concat_floatdouble") {
151        init9(mya);
152        init9(myb);
153        init9(myr);
154    }
155protected:
156    virtual int mulLoopCount() const { return 4; }
157
158    static inline void muladdmul(float a, float b, float c, float d,
159                                   float* result) {
160      *result = SkDoubleToFloat((double)a * b + (double)c * d);
161    }
162    virtual void performTest() {
163        const float* a = mya;
164        const float* b = myb;
165        float* r = myr;
166        muladdmul(a[0], b[0], a[1], b[3], &r[0]);
167        muladdmul(a[0], b[1], a[1], b[4], &r[1]);
168        muladdmul(a[0], b[2], a[1], b[5], &r[2]);
169        r[2] += a[2];
170        muladdmul(a[3], b[0], a[4], b[3], &r[3]);
171        muladdmul(a[3], b[1], a[4], b[4], &r[4]);
172        muladdmul(a[3], b[2], a[4], b[5], &r[5]);
173        r[5] += a[5];
174        r[6] = r[7] = 0.0f;
175        r[8] = 1.0f;
176    }
177private:
178    float mya [9];
179    float myb [9];
180    float myr [9];
181    typedef MatrixBench INHERITED;
182};
183
184// Test the performance of setConcat() non-perspective case:
185// using double precision only.
186class DoubleConcatMatrixBench : public MatrixBench {
187public:
188    DoubleConcatMatrixBench(void* p) : INHERITED(p, "concat_double") {
189        init9(mya);
190        init9(myb);
191        init9(myr);
192    }
193protected:
194    virtual int mulLoopCount() const { return 4; }
195
196    static inline void muladdmul(double a, double b, double c, double d,
197                                   double* result) {
198      *result = a * b + c * d;
199    }
200    virtual void performTest() {
201        const double* a = mya;
202        const double* b = myb;
203        double* r = myr;
204        muladdmul(a[0], b[0], a[1], b[3], &r[0]);
205        muladdmul(a[0], b[1], a[1], b[4], &r[1]);
206        muladdmul(a[0], b[2], a[1], b[5], &r[2]);
207        r[2] += a[2];
208        muladdmul(a[3], b[0], a[4], b[3], &r[3]);
209        muladdmul(a[3], b[1], a[4], b[4], &r[4]);
210        muladdmul(a[3], b[2], a[4], b[5], &r[5]);
211        r[5] += a[5];
212        r[6] = r[7] = 0.0;
213        r[8] = 1.0;
214    }
215private:
216    double mya [9];
217    double myb [9];
218    double myr [9];
219    typedef MatrixBench INHERITED;
220};
221
222class GetTypeMatrixBench : public MatrixBench {
223public:
224    GetTypeMatrixBench(void* param)
225        : INHERITED(param, "gettype") {
226        fArray[0] = (float) fRnd.nextS();
227        fArray[1] = (float) fRnd.nextS();
228        fArray[2] = (float) fRnd.nextS();
229        fArray[3] = (float) fRnd.nextS();
230        fArray[4] = (float) fRnd.nextS();
231        fArray[5] = (float) fRnd.nextS();
232        fArray[6] = (float) fRnd.nextS();
233        fArray[7] = (float) fRnd.nextS();
234        fArray[8] = (float) fRnd.nextS();
235    }
236protected:
237    // Putting random generation of the matrix inside performTest()
238    // would help us avoid anomalous runs, but takes up 25% or
239    // more of the function time.
240    virtual void performTest() {
241        fMatrix.setAll(fArray[0], fArray[1], fArray[2],
242                       fArray[3], fArray[4], fArray[5],
243                       fArray[6], fArray[7], fArray[8]);
244        always_do(fMatrix.getType());
245        fMatrix.dirtyMatrixTypeCache();
246        always_do(fMatrix.getType());
247        fMatrix.dirtyMatrixTypeCache();
248        always_do(fMatrix.getType());
249        fMatrix.dirtyMatrixTypeCache();
250        always_do(fMatrix.getType());
251        fMatrix.dirtyMatrixTypeCache();
252        always_do(fMatrix.getType());
253        fMatrix.dirtyMatrixTypeCache();
254        always_do(fMatrix.getType());
255        fMatrix.dirtyMatrixTypeCache();
256        always_do(fMatrix.getType());
257        fMatrix.dirtyMatrixTypeCache();
258        always_do(fMatrix.getType());
259    }
260private:
261    SkMatrix fMatrix;
262    float fArray[9];
263    SkRandom fRnd;
264    typedef MatrixBench INHERITED;
265};
266
267#ifdef SK_SCALAR_IS_FLOAT
268class ScaleTransMixedMatrixBench : public MatrixBench {
269 public:
270    ScaleTransMixedMatrixBench(void* p) : INHERITED(p, "scaletrans_mixed"), fCount (16) {
271        fMatrix.setAll(fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1(),
272                       fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1(),
273                       fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1());
274        int i;
275        for (i = 0; i < SkBENCHLOOP(fCount); i++) {
276            fSrc[i].fX = fRandom.nextSScalar1();
277            fSrc[i].fY = fRandom.nextSScalar1();
278            fDst[i].fX = fRandom.nextSScalar1();
279            fDst[i].fY = fRandom.nextSScalar1();
280        }
281    }
282 protected:
283    virtual void performTest() {
284        SkPoint* dst = fDst;
285        const SkPoint* src = fSrc;
286        int count = SkBENCHLOOP(fCount);
287        float mx = fMatrix[SkMatrix::kMScaleX];
288        float my = fMatrix[SkMatrix::kMScaleY];
289        float tx = fMatrix[SkMatrix::kMTransX];
290        float ty = fMatrix[SkMatrix::kMTransY];
291        do {
292            dst->fY = SkScalarMulAdd(src->fY, my, ty);
293            dst->fX = SkScalarMulAdd(src->fX, mx, tx);
294            src += 1;
295            dst += 1;
296        } while (--count);
297    }
298 private:
299    SkMatrix fMatrix;
300    SkPoint fSrc [16];
301    SkPoint fDst [16];
302    int fCount;
303    SkRandom fRandom;
304    typedef MatrixBench INHERITED;
305};
306
307class ScaleTransDoubleMatrixBench : public MatrixBench {
308 public:
309    ScaleTransDoubleMatrixBench(void* p) : INHERITED(p, "scaletrans_double"), fCount (16) {
310        init9(fMatrix);
311        int i;
312        for (i = 0; i < SkBENCHLOOP(fCount); i++) {
313            fSrc[i].fX = fRandom.nextSScalar1();
314            fSrc[i].fY = fRandom.nextSScalar1();
315            fDst[i].fX = fRandom.nextSScalar1();
316            fDst[i].fY = fRandom.nextSScalar1();
317        }
318    }
319 protected:
320    virtual void performTest() {
321        SkPoint* dst = fDst;
322        const SkPoint* src = fSrc;
323        int count = SkBENCHLOOP(fCount);
324        // As doubles, on Z600 Linux systems this is 2.5x as expensive as mixed mode
325        float mx = (float) fMatrix[SkMatrix::kMScaleX];
326        float my = (float) fMatrix[SkMatrix::kMScaleY];
327        float tx = (float) fMatrix[SkMatrix::kMTransX];
328        float ty = (float) fMatrix[SkMatrix::kMTransY];
329        do {
330            dst->fY = src->fY * my + ty;
331            dst->fX = src->fX * mx + tx;
332            src += 1;
333            dst += 1;
334        } while (--count);
335    }
336 private:
337    double fMatrix [9];
338    SkPoint fSrc [16];
339    SkPoint fDst [16];
340    int fCount;
341    SkRandom fRandom;
342    typedef MatrixBench INHERITED;
343};
344#endif
345
346class InvertMapRectMatrixBench : public MatrixBench {
347public:
348    InvertMapRectMatrixBench(void* param, const char* name, int flags)
349        : INHERITED(param, name)
350        , fFlags(flags) {
351        fMatrix.reset();
352        fIteration = 0;
353        if (flags & kScale_Flag) {
354            fMatrix.postScale(SkFloatToScalar(1.5f), SkFloatToScalar(2.5f));
355        }
356        if (flags & kTranslate_Flag) {
357            fMatrix.postTranslate(SkFloatToScalar(1.5f), SkFloatToScalar(2.5f));
358        }
359        if (flags & kRotate_Flag) {
360            fMatrix.postRotate(SkFloatToScalar(45.0f));
361        }
362        if (flags & kPerspective_Flag) {
363            fMatrix.setPerspX(SkFloatToScalar(1.5f));
364            fMatrix.setPerspY(SkFloatToScalar(2.5f));
365        }
366        if (0 == (flags & kUncachedTypeMask_Flag)) {
367            fMatrix.getType();
368        }
369    }
370    enum Flag {
371        kScale_Flag             = 0x01,
372        kTranslate_Flag         = 0x02,
373        kRotate_Flag            = 0x04,
374        kPerspective_Flag       = 0x08,
375        kUncachedTypeMask_Flag  = 0x10,
376    };
377protected:
378    virtual void performTest() {
379        if (fFlags & kUncachedTypeMask_Flag) {
380            // This will invalidate the typemask without
381            // changing the matrix.
382            fMatrix.setPerspX(fMatrix.getPerspX());
383        }
384        SkMatrix inv;
385        bool invertible = fMatrix.invert(&inv);
386        SkASSERT(invertible);
387        SkRect transformedRect;
388        // an arbitrary, small, non-zero rect to transform
389        SkRect srcRect = SkRect::MakeWH(SkIntToScalar(10), SkIntToScalar(10));
390        if (invertible) {
391            inv.mapRect(&transformedRect, srcRect);
392        }
393    }
394private:
395    SkMatrix fMatrix;
396    int fFlags;
397    unsigned fIteration;
398    typedef MatrixBench INHERITED;
399};
400
401
402
403
404static SkBenchmark* M0(void* p) { return new EqualsMatrixBench(p); }
405static SkBenchmark* M1(void* p) { return new ScaleMatrixBench(p); }
406static SkBenchmark* M2(void* p) { return new FloatConcatMatrixBench(p); }
407static SkBenchmark* M3(void* p) { return new FloatDoubleConcatMatrixBench(p); }
408static SkBenchmark* M4(void* p) { return new DoubleConcatMatrixBench(p); }
409static SkBenchmark* M5(void* p) { return new GetTypeMatrixBench(p); }
410static SkBenchmark* M6(void* p) {
411    return new InvertMapRectMatrixBench(p,
412        "invert_maprect_identity", 0);
413}
414static SkBenchmark* M7(void* p) {
415    return new InvertMapRectMatrixBench(p,
416        "invert_maprect_rectstaysrect",
417        InvertMapRectMatrixBench::kScale_Flag |
418        InvertMapRectMatrixBench::kTranslate_Flag);
419}
420static SkBenchmark* M8(void* p) {
421    return new InvertMapRectMatrixBench(p,
422        "invert_maprect_nonpersp",
423        InvertMapRectMatrixBench::kScale_Flag |
424        InvertMapRectMatrixBench::kRotate_Flag |
425        InvertMapRectMatrixBench::kTranslate_Flag);
426}
427static SkBenchmark* M9(void* p) {
428    return new InvertMapRectMatrixBench(p,
429        "invert_maprect_persp",
430        InvertMapRectMatrixBench::kPerspective_Flag);
431}
432static SkBenchmark* M10(void* p) {
433    return new InvertMapRectMatrixBench(p,
434        "invert_maprect_typemask_rectstaysrect",
435        InvertMapRectMatrixBench::kUncachedTypeMask_Flag |
436        InvertMapRectMatrixBench::kScale_Flag |
437        InvertMapRectMatrixBench::kTranslate_Flag);
438}
439static SkBenchmark* M11(void* p) {
440    return new InvertMapRectMatrixBench(p,
441        "invert_maprect_typemask_nonpersp",
442        InvertMapRectMatrixBench::kUncachedTypeMask_Flag |
443        InvertMapRectMatrixBench::kScale_Flag |
444        InvertMapRectMatrixBench::kRotate_Flag |
445        InvertMapRectMatrixBench::kTranslate_Flag);
446}
447
448static BenchRegistry gReg0(M0);
449static BenchRegistry gReg1(M1);
450static BenchRegistry gReg2(M2);
451static BenchRegistry gReg3(M3);
452static BenchRegistry gReg4(M4);
453static BenchRegistry gReg5(M5);
454static BenchRegistry gReg6(M6);
455static BenchRegistry gReg7(M7);
456static BenchRegistry gReg8(M8);
457static BenchRegistry gReg9(M9);
458static BenchRegistry gReg10(M10);
459static BenchRegistry gReg11(M11);
460
461#ifdef SK_SCALAR_IS_FLOAT
462static SkBenchmark* FlM0(void* p) { return new ScaleTransMixedMatrixBench(p); }
463static SkBenchmark* FlM1(void* p) { return new ScaleTransDoubleMatrixBench(p); }
464static BenchRegistry gFlReg5(FlM0);
465static BenchRegistry gFlReg6(FlM1);
466#endif
467