1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "BasicHashtable"
18
19#include <math.h>
20
21#include <utils/Log.h>
22#include <utils/BasicHashtable.h>
23#include <utils/misc.h>
24
25namespace android {
26
27BasicHashtableImpl::BasicHashtableImpl(size_t entrySize, bool hasTrivialDestructor,
28        size_t minimumInitialCapacity, float loadFactor) :
29        mBucketSize(entrySize + sizeof(Bucket)), mHasTrivialDestructor(hasTrivialDestructor),
30        mLoadFactor(loadFactor), mSize(0),
31        mFilledBuckets(0), mBuckets(NULL) {
32    determineCapacity(minimumInitialCapacity, mLoadFactor, &mBucketCount, &mCapacity);
33}
34
35BasicHashtableImpl::BasicHashtableImpl(const BasicHashtableImpl& other) :
36        mBucketSize(other.mBucketSize), mHasTrivialDestructor(other.mHasTrivialDestructor),
37        mCapacity(other.mCapacity), mLoadFactor(other.mLoadFactor),
38        mSize(other.mSize), mFilledBuckets(other.mFilledBuckets),
39        mBucketCount(other.mBucketCount), mBuckets(other.mBuckets) {
40    if (mBuckets) {
41        SharedBuffer::bufferFromData(mBuckets)->acquire();
42    }
43}
44
45BasicHashtableImpl::~BasicHashtableImpl()
46{
47}
48
49void BasicHashtableImpl::dispose() {
50    if (mBuckets) {
51        releaseBuckets(mBuckets, mBucketCount);
52    }
53}
54
55void BasicHashtableImpl::clone() {
56    if (mBuckets) {
57        void* newBuckets = allocateBuckets(mBucketCount);
58        copyBuckets(mBuckets, newBuckets, mBucketCount);
59        releaseBuckets(mBuckets, mBucketCount);
60        mBuckets = newBuckets;
61    }
62}
63
64void BasicHashtableImpl::setTo(const BasicHashtableImpl& other) {
65    if (mBuckets) {
66        releaseBuckets(mBuckets, mBucketCount);
67    }
68
69    mCapacity = other.mCapacity;
70    mLoadFactor = other.mLoadFactor;
71    mSize = other.mSize;
72    mFilledBuckets = other.mFilledBuckets;
73    mBucketCount = other.mBucketCount;
74    mBuckets = other.mBuckets;
75
76    if (mBuckets) {
77        SharedBuffer::bufferFromData(mBuckets)->acquire();
78    }
79}
80
81void BasicHashtableImpl::clear() {
82    if (mBuckets) {
83        if (mFilledBuckets) {
84            SharedBuffer* sb = SharedBuffer::bufferFromData(mBuckets);
85            if (sb->onlyOwner()) {
86                destroyBuckets(mBuckets, mBucketCount);
87                for (size_t i = 0; i < mBucketCount; i++) {
88                    Bucket& bucket = bucketAt(mBuckets, i);
89                    bucket.cookie = 0;
90                }
91            } else {
92                releaseBuckets(mBuckets, mBucketCount);
93                mBuckets = NULL;
94            }
95            mFilledBuckets = 0;
96        }
97        mSize = 0;
98    }
99}
100
101ssize_t BasicHashtableImpl::next(ssize_t index) const {
102    if (mSize) {
103        while (size_t(++index) < mBucketCount) {
104            const Bucket& bucket = bucketAt(mBuckets, index);
105            if (bucket.cookie & Bucket::PRESENT) {
106                return index;
107            }
108        }
109    }
110    return -1;
111}
112
113ssize_t BasicHashtableImpl::find(ssize_t index, hash_t hash,
114        const void* __restrict__ key) const {
115    if (!mSize) {
116        return -1;
117    }
118
119    hash = trimHash(hash);
120    if (index < 0) {
121        index = chainStart(hash, mBucketCount);
122
123        const Bucket& bucket = bucketAt(mBuckets, size_t(index));
124        if (bucket.cookie & Bucket::PRESENT) {
125            if (compareBucketKey(bucket, key)) {
126                return index;
127            }
128        } else {
129            if (!(bucket.cookie & Bucket::COLLISION)) {
130                return -1;
131            }
132        }
133    }
134
135    size_t inc = chainIncrement(hash, mBucketCount);
136    for (;;) {
137        index = chainSeek(index, inc, mBucketCount);
138
139        const Bucket& bucket = bucketAt(mBuckets, size_t(index));
140        if (bucket.cookie & Bucket::PRESENT) {
141            if ((bucket.cookie & Bucket::HASH_MASK) == hash
142                    && compareBucketKey(bucket, key)) {
143                return index;
144            }
145        }
146        if (!(bucket.cookie & Bucket::COLLISION)) {
147            return -1;
148        }
149    }
150}
151
152size_t BasicHashtableImpl::add(hash_t hash, const void* entry) {
153    if (!mBuckets) {
154        mBuckets = allocateBuckets(mBucketCount);
155    } else {
156        edit();
157    }
158
159    hash = trimHash(hash);
160    for (;;) {
161        size_t index = chainStart(hash, mBucketCount);
162        Bucket* bucket = &bucketAt(mBuckets, size_t(index));
163        if (bucket->cookie & Bucket::PRESENT) {
164            size_t inc = chainIncrement(hash, mBucketCount);
165            do {
166                bucket->cookie |= Bucket::COLLISION;
167                index = chainSeek(index, inc, mBucketCount);
168                bucket = &bucketAt(mBuckets, size_t(index));
169            } while (bucket->cookie & Bucket::PRESENT);
170        }
171
172        uint32_t collision = bucket->cookie & Bucket::COLLISION;
173        if (!collision) {
174            if (mFilledBuckets >= mCapacity) {
175                rehash(mCapacity * 2, mLoadFactor);
176                continue;
177            }
178            mFilledBuckets += 1;
179        }
180
181        bucket->cookie = collision | Bucket::PRESENT | hash;
182        mSize += 1;
183        initializeBucketEntry(*bucket, entry);
184        return index;
185    }
186}
187
188void BasicHashtableImpl::removeAt(size_t index) {
189    edit();
190
191    Bucket& bucket = bucketAt(mBuckets, index);
192    bucket.cookie &= ~Bucket::PRESENT;
193    if (!(bucket.cookie & Bucket::COLLISION)) {
194        mFilledBuckets -= 1;
195    }
196    mSize -= 1;
197    if (!mHasTrivialDestructor) {
198        destroyBucketEntry(bucket);
199    }
200}
201
202void BasicHashtableImpl::rehash(size_t minimumCapacity, float loadFactor) {
203    if (minimumCapacity < mSize) {
204        minimumCapacity = mSize;
205    }
206    size_t newBucketCount, newCapacity;
207    determineCapacity(minimumCapacity, loadFactor, &newBucketCount, &newCapacity);
208
209    if (newBucketCount != mBucketCount || newCapacity != mCapacity) {
210        if (mBuckets) {
211            void* newBuckets;
212            if (mSize) {
213                newBuckets = allocateBuckets(newBucketCount);
214                for (size_t i = 0; i < mBucketCount; i++) {
215                    const Bucket& fromBucket = bucketAt(mBuckets, i);
216                    if (fromBucket.cookie & Bucket::PRESENT) {
217                        hash_t hash = fromBucket.cookie & Bucket::HASH_MASK;
218                        size_t index = chainStart(hash, newBucketCount);
219                        Bucket* toBucket = &bucketAt(newBuckets, size_t(index));
220                        if (toBucket->cookie & Bucket::PRESENT) {
221                            size_t inc = chainIncrement(hash, newBucketCount);
222                            do {
223                                toBucket->cookie |= Bucket::COLLISION;
224                                index = chainSeek(index, inc, newBucketCount);
225                                toBucket = &bucketAt(newBuckets, size_t(index));
226                            } while (toBucket->cookie & Bucket::PRESENT);
227                        }
228                        toBucket->cookie = Bucket::PRESENT | hash;
229                        initializeBucketEntry(*toBucket, fromBucket.entry);
230                    }
231                }
232            } else {
233                newBuckets = NULL;
234            }
235            releaseBuckets(mBuckets, mBucketCount);
236            mBuckets = newBuckets;
237            mFilledBuckets = mSize;
238        }
239        mBucketCount = newBucketCount;
240        mCapacity = newCapacity;
241    }
242    mLoadFactor = loadFactor;
243}
244
245void* BasicHashtableImpl::allocateBuckets(size_t count) const {
246    size_t bytes = count * mBucketSize;
247    SharedBuffer* sb = SharedBuffer::alloc(bytes);
248    LOG_ALWAYS_FATAL_IF(!sb, "Could not allocate %u bytes for hashtable with %u buckets.",
249            uint32_t(bytes), uint32_t(count));
250    void* buckets = sb->data();
251    for (size_t i = 0; i < count; i++) {
252        Bucket& bucket = bucketAt(buckets, i);
253        bucket.cookie = 0;
254    }
255    return buckets;
256}
257
258void BasicHashtableImpl::releaseBuckets(void* __restrict__ buckets, size_t count) const {
259    SharedBuffer* sb = SharedBuffer::bufferFromData(buckets);
260    if (sb->release(SharedBuffer::eKeepStorage) == 1) {
261        destroyBuckets(buckets, count);
262        SharedBuffer::dealloc(sb);
263    }
264}
265
266void BasicHashtableImpl::destroyBuckets(void* __restrict__ buckets, size_t count) const {
267    if (!mHasTrivialDestructor) {
268        for (size_t i = 0; i < count; i++) {
269            Bucket& bucket = bucketAt(buckets, i);
270            if (bucket.cookie & Bucket::PRESENT) {
271                destroyBucketEntry(bucket);
272            }
273        }
274    }
275}
276
277void BasicHashtableImpl::copyBuckets(const void* __restrict__ fromBuckets,
278        void* __restrict__ toBuckets, size_t count) const {
279    for (size_t i = 0; i < count; i++) {
280        const Bucket& fromBucket = bucketAt(fromBuckets, i);
281        Bucket& toBucket = bucketAt(toBuckets, i);
282        toBucket.cookie = fromBucket.cookie;
283        if (fromBucket.cookie & Bucket::PRESENT) {
284            initializeBucketEntry(toBucket, fromBucket.entry);
285        }
286    }
287}
288
289// Table of 31-bit primes where each prime is no less than twice as large
290// as the previous one.  Generated by "primes.py".
291static size_t PRIMES[] = {
292    5,
293    11,
294    23,
295    47,
296    97,
297    197,
298    397,
299    797,
300    1597,
301    3203,
302    6421,
303    12853,
304    25717,
305    51437,
306    102877,
307    205759,
308    411527,
309    823117,
310    1646237,
311    3292489,
312    6584983,
313    13169977,
314    26339969,
315    52679969,
316    105359939,
317    210719881,
318    421439783,
319    842879579,
320    1685759167,
321    0,
322};
323
324void BasicHashtableImpl::determineCapacity(size_t minimumCapacity, float loadFactor,
325        size_t* __restrict__ outBucketCount, size_t* __restrict__ outCapacity) {
326    LOG_ALWAYS_FATAL_IF(loadFactor <= 0.0f || loadFactor > 1.0f,
327            "Invalid load factor %0.3f.  Must be in the range (0, 1].", loadFactor);
328
329    size_t count = ceilf(minimumCapacity / loadFactor) + 1;
330    size_t i = 0;
331    while (count > PRIMES[i] && i < NELEM(PRIMES)) {
332        i++;
333    }
334    count = PRIMES[i];
335    LOG_ALWAYS_FATAL_IF(!count, "Could not determine required number of buckets for "
336            "hashtable with minimum capacity %u and load factor %0.3f.",
337            uint32_t(minimumCapacity), loadFactor);
338    *outBucketCount = count;
339    *outCapacity = ceilf((count - 1) * loadFactor);
340}
341
342}; // namespace android
343