SkChecksum.h revision 6ac0037b70410ff7d5ce5788bc89314223e1a587
1/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkChecksum_DEFINED
9#define SkChecksum_DEFINED
10
11#include "SkTypes.h"
12
13/**
14 *  Computes a 32bit checksum from a blob of 32bit aligned data. This is meant
15 *  to be very very fast, as it is used internally by the font cache, in
16 *  conjuction with the entire raw key. This algorithm does not generate
17 *  unique values as well as others (e.g. MD5) but it performs much faster.
18 *  Skia's use cases can survive non-unique values (since the entire key is
19 *  always available). Clients should only be used in circumstances where speed
20 *  over uniqueness is at a premium.
21 */
22class SkChecksum : SkNoncopyable {
23private:
24    /*
25     *  Our Rotate and Mash helpers are meant to automatically do the right
26     *  thing depending if sizeof(uintptr_t) is 4 or 8.
27     */
28    enum {
29        ROTR = 17,
30        ROTL = sizeof(uintptr_t) * 8 - ROTR,
31        HALFBITS = sizeof(uintptr_t) * 4
32    };
33
34    static inline uintptr_t Mash(uintptr_t total, uintptr_t value) {
35        return ((total >> ROTR) | (total << ROTL)) ^ value;
36    }
37
38public:
39    /**
40     * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
41     * suspect its low bits aren't well mixed.
42     *
43     * This is the Murmur3 finalizer.
44     */
45    static uint32_t Mix(uint32_t hash) {
46        hash ^= hash >> 16;
47        hash *= 0x85ebca6b;
48        hash ^= hash >> 13;
49        hash *= 0xc2b2ae35;
50        hash ^= hash >> 16;
51        return hash;
52    }
53
54    /**
55     * Calculate 32-bit Murmur hash (murmur3).
56     * This should take 2-3x longer than SkChecksum::Compute, but is a considerably better hash.
57     * See en.wikipedia.org/wiki/MurmurHash.
58     *
59     *  @param data Memory address of the data block to be processed. Must be 32-bit aligned.
60     *  @param size Size of the data block in bytes. Must be a multiple of 4.
61     *  @param seed Initial hash seed. (optional)
62     *  @return hash result
63     */
64    static uint32_t Murmur3(const uint32_t* data, size_t bytes, uint32_t seed=0) {
65        SkASSERTF(SkIsAlign4(bytes), "Expected 4-byte multiple, got %zu", bytes);
66        const size_t words = bytes/4;
67
68        uint32_t hash = seed;
69        for (size_t i = 0; i < words; i++) {
70            uint32_t k = data[i];
71            k *= 0xcc9e2d51;
72            k = (k << 15) | (k >> 17);
73            k *= 0x1b873593;
74
75            hash ^= k;
76            hash = (hash << 13) | (hash >> 19);
77            hash *= 5;
78            hash += 0xe6546b64;
79        }
80        hash ^= bytes;
81        return Mix(hash);
82    }
83
84    /**
85     *  Compute a 32-bit checksum for a given data block
86     *
87     *  WARNING: this algorithm is tuned for efficiency, not backward/forward
88     *  compatibility.  It may change at any time, so a checksum generated with
89     *  one version of the Skia code may not match a checksum generated with
90     *  a different version of the Skia code.
91     *
92     *  @param data Memory address of the data block to be processed. Must be
93     *              32-bit aligned.
94     *  @param size Size of the data block in bytes. Must be a multiple of 4.
95     *  @return checksum result
96     */
97    static uint32_t Compute(const uint32_t* data, size_t size) {
98        SkASSERT(SkIsAlign4(size));
99
100        /*
101         *  We want to let the compiler use 32bit or 64bit addressing and math
102         *  so we use uintptr_t as our magic type. This makes the code a little
103         *  more obscure (we can't hard-code 32 or 64 anywhere, but have to use
104         *  sizeof()).
105         */
106        uintptr_t result = 0;
107        const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(data);
108
109        /*
110         *  count the number of quad element chunks. This takes into account
111         *  if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t)
112         *  to compute how much to shift-down the size.
113         */
114        size_t n4 = size / (sizeof(uintptr_t) << 2);
115        for (size_t i = 0; i < n4; ++i) {
116            result = Mash(result, *ptr++);
117            result = Mash(result, *ptr++);
118            result = Mash(result, *ptr++);
119            result = Mash(result, *ptr++);
120        }
121        size &= ((sizeof(uintptr_t) << 2) - 1);
122
123        data = reinterpret_cast<const uint32_t*>(ptr);
124        const uint32_t* stop = data + (size >> 2);
125        while (data < stop) {
126            result = Mash(result, *data++);
127        }
128
129        /*
130         *  smash us down to 32bits if we were 64. Note that when uintptr_t is
131         *  32bits, this code-path should go away, but I still got a warning
132         *  when I wrote
133         *      result ^= result >> 32;
134         *  since >>32 is undefined for 32bit ints, hence the wacky HALFBITS
135         *  define.
136         */
137        if (8 == sizeof(result)) {
138            result ^= result >> HALFBITS;
139        }
140        return static_cast<uint32_t>(result);
141    }
142};
143
144#endif
145