1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "BasicHashtable"
18
19#include <math.h>
20
21#include <utils/Log.h>
22#include <utils/BasicHashtable.h>
23#include <utils/misc.h>
24
25namespace android {
26
27BasicHashtableImpl::BasicHashtableImpl(size_t entrySize, bool hasTrivialDestructor,
28        size_t minimumInitialCapacity, float loadFactor) :
29        mBucketSize(entrySize + sizeof(Bucket)), mHasTrivialDestructor(hasTrivialDestructor),
30        mLoadFactor(loadFactor), mSize(0),
31        mFilledBuckets(0), mBuckets(NULL) {
32    determineCapacity(minimumInitialCapacity, mLoadFactor, &mBucketCount, &mCapacity);
33}
34
35BasicHashtableImpl::BasicHashtableImpl(const BasicHashtableImpl& other) :
36        mBucketSize(other.mBucketSize), mHasTrivialDestructor(other.mHasTrivialDestructor),
37        mCapacity(other.mCapacity), mLoadFactor(other.mLoadFactor),
38        mSize(other.mSize), mFilledBuckets(other.mFilledBuckets),
39        mBucketCount(other.mBucketCount), mBuckets(other.mBuckets) {
40    if (mBuckets) {
41        SharedBuffer::bufferFromData(mBuckets)->acquire();
42    }
43}
44
45void BasicHashtableImpl::dispose() {
46    if (mBuckets) {
47        releaseBuckets(mBuckets, mBucketCount);
48    }
49}
50
51void BasicHashtableImpl::clone() {
52    if (mBuckets) {
53        void* newBuckets = allocateBuckets(mBucketCount);
54        copyBuckets(mBuckets, newBuckets, mBucketCount);
55        releaseBuckets(mBuckets, mBucketCount);
56        mBuckets = newBuckets;
57    }
58}
59
60void BasicHashtableImpl::setTo(const BasicHashtableImpl& other) {
61    if (mBuckets) {
62        releaseBuckets(mBuckets, mBucketCount);
63    }
64
65    mCapacity = other.mCapacity;
66    mLoadFactor = other.mLoadFactor;
67    mSize = other.mSize;
68    mFilledBuckets = other.mFilledBuckets;
69    mBucketCount = other.mBucketCount;
70    mBuckets = other.mBuckets;
71
72    if (mBuckets) {
73        SharedBuffer::bufferFromData(mBuckets)->acquire();
74    }
75}
76
77void BasicHashtableImpl::clear() {
78    if (mBuckets) {
79        if (mFilledBuckets) {
80            SharedBuffer* sb = SharedBuffer::bufferFromData(mBuckets);
81            if (sb->onlyOwner()) {
82                destroyBuckets(mBuckets, mBucketCount);
83                for (size_t i = 0; i < mSize; i++) {
84                    Bucket& bucket = bucketAt(mBuckets, i);
85                    bucket.cookie = 0;
86                }
87            } else {
88                releaseBuckets(mBuckets, mBucketCount);
89                mBuckets = NULL;
90            }
91            mFilledBuckets = 0;
92        }
93        mSize = 0;
94    }
95}
96
97ssize_t BasicHashtableImpl::next(ssize_t index) const {
98    if (mSize) {
99        while (size_t(++index) < mBucketCount) {
100            const Bucket& bucket = bucketAt(mBuckets, index);
101            if (bucket.cookie & Bucket::PRESENT) {
102                return index;
103            }
104        }
105    }
106    return -1;
107}
108
109ssize_t BasicHashtableImpl::find(ssize_t index, hash_t hash,
110        const void* __restrict__ key) const {
111    if (!mSize) {
112        return -1;
113    }
114
115    hash = trimHash(hash);
116    if (index < 0) {
117        index = chainStart(hash, mBucketCount);
118
119        const Bucket& bucket = bucketAt(mBuckets, size_t(index));
120        if (bucket.cookie & Bucket::PRESENT) {
121            if (compareBucketKey(bucket, key)) {
122                return index;
123            }
124        } else {
125            if (!(bucket.cookie & Bucket::COLLISION)) {
126                return -1;
127            }
128        }
129    }
130
131    size_t inc = chainIncrement(hash, mBucketCount);
132    for (;;) {
133        index = chainSeek(index, inc, mBucketCount);
134
135        const Bucket& bucket = bucketAt(mBuckets, size_t(index));
136        if (bucket.cookie & Bucket::PRESENT) {
137            if ((bucket.cookie & Bucket::HASH_MASK) == hash
138                    && compareBucketKey(bucket, key)) {
139                return index;
140            }
141        }
142        if (!(bucket.cookie & Bucket::COLLISION)) {
143            return -1;
144        }
145    }
146}
147
148size_t BasicHashtableImpl::add(hash_t hash, const void* entry) {
149    if (!mBuckets) {
150        mBuckets = allocateBuckets(mBucketCount);
151    } else {
152        edit();
153    }
154
155    hash = trimHash(hash);
156    for (;;) {
157        size_t index = chainStart(hash, mBucketCount);
158        Bucket* bucket = &bucketAt(mBuckets, size_t(index));
159        if (bucket->cookie & Bucket::PRESENT) {
160            size_t inc = chainIncrement(hash, mBucketCount);
161            do {
162                bucket->cookie |= Bucket::COLLISION;
163                index = chainSeek(index, inc, mBucketCount);
164                bucket = &bucketAt(mBuckets, size_t(index));
165            } while (bucket->cookie & Bucket::PRESENT);
166        }
167
168        uint32_t collision = bucket->cookie & Bucket::COLLISION;
169        if (!collision) {
170            if (mFilledBuckets >= mCapacity) {
171                rehash(mCapacity * 2, mLoadFactor);
172                continue;
173            }
174            mFilledBuckets += 1;
175        }
176
177        bucket->cookie = collision | Bucket::PRESENT | hash;
178        mSize += 1;
179        initializeBucketEntry(*bucket, entry);
180        return index;
181    }
182}
183
184void BasicHashtableImpl::removeAt(size_t index) {
185    edit();
186
187    Bucket& bucket = bucketAt(mBuckets, index);
188    bucket.cookie &= ~Bucket::PRESENT;
189    if (!(bucket.cookie & Bucket::COLLISION)) {
190        mFilledBuckets -= 1;
191    }
192    mSize -= 1;
193    if (!mHasTrivialDestructor) {
194        destroyBucketEntry(bucket);
195    }
196}
197
198void BasicHashtableImpl::rehash(size_t minimumCapacity, float loadFactor) {
199    if (minimumCapacity < mSize) {
200        minimumCapacity = mSize;
201    }
202    size_t newBucketCount, newCapacity;
203    determineCapacity(minimumCapacity, loadFactor, &newBucketCount, &newCapacity);
204
205    if (newBucketCount != mBucketCount || newCapacity != mCapacity) {
206        if (mBuckets) {
207            void* newBuckets;
208            if (mSize) {
209                newBuckets = allocateBuckets(newBucketCount);
210                for (size_t i = 0; i < mBucketCount; i++) {
211                    const Bucket& fromBucket = bucketAt(mBuckets, i);
212                    if (fromBucket.cookie & Bucket::PRESENT) {
213                        hash_t hash = fromBucket.cookie & Bucket::HASH_MASK;
214                        size_t index = chainStart(hash, newBucketCount);
215                        Bucket* toBucket = &bucketAt(newBuckets, size_t(index));
216                        if (toBucket->cookie & Bucket::PRESENT) {
217                            size_t inc = chainIncrement(hash, newBucketCount);
218                            do {
219                                toBucket->cookie |= Bucket::COLLISION;
220                                index = chainSeek(index, inc, newBucketCount);
221                                toBucket = &bucketAt(newBuckets, size_t(index));
222                            } while (toBucket->cookie & Bucket::PRESENT);
223                        }
224                        toBucket->cookie = Bucket::PRESENT | hash;
225                        initializeBucketEntry(*toBucket, fromBucket.entry);
226                    }
227                }
228            } else {
229                newBuckets = NULL;
230            }
231            releaseBuckets(mBuckets, mBucketCount);
232            mBuckets = newBuckets;
233            mFilledBuckets = mSize;
234        }
235        mBucketCount = newBucketCount;
236        mCapacity = newCapacity;
237    }
238    mLoadFactor = loadFactor;
239}
240
241void* BasicHashtableImpl::allocateBuckets(size_t count) const {
242    size_t bytes = count * mBucketSize;
243    SharedBuffer* sb = SharedBuffer::alloc(bytes);
244    LOG_ALWAYS_FATAL_IF(!sb, "Could not allocate %u bytes for hashtable with %u buckets.",
245            uint32_t(bytes), uint32_t(count));
246    void* buckets = sb->data();
247    for (size_t i = 0; i < count; i++) {
248        Bucket& bucket = bucketAt(buckets, i);
249        bucket.cookie = 0;
250    }
251    return buckets;
252}
253
254void BasicHashtableImpl::releaseBuckets(void* __restrict__ buckets, size_t count) const {
255    SharedBuffer* sb = SharedBuffer::bufferFromData(buckets);
256    if (sb->release(SharedBuffer::eKeepStorage) == 1) {
257        destroyBuckets(buckets, count);
258        SharedBuffer::dealloc(sb);
259    }
260}
261
262void BasicHashtableImpl::destroyBuckets(void* __restrict__ buckets, size_t count) const {
263    if (!mHasTrivialDestructor) {
264        for (size_t i = 0; i < count; i++) {
265            Bucket& bucket = bucketAt(buckets, i);
266            if (bucket.cookie & Bucket::PRESENT) {
267                destroyBucketEntry(bucket);
268            }
269        }
270    }
271}
272
273void BasicHashtableImpl::copyBuckets(const void* __restrict__ fromBuckets,
274        void* __restrict__ toBuckets, size_t count) const {
275    for (size_t i = 0; i < count; i++) {
276        const Bucket& fromBucket = bucketAt(fromBuckets, i);
277        Bucket& toBucket = bucketAt(toBuckets, i);
278        toBucket.cookie = fromBucket.cookie;
279        if (fromBucket.cookie & Bucket::PRESENT) {
280            initializeBucketEntry(toBucket, fromBucket.entry);
281        }
282    }
283}
284
285// Table of 31-bit primes where each prime is no less than twice as large
286// as the previous one.  Generated by "primes.py".
287static size_t PRIMES[] = {
288    5,
289    11,
290    23,
291    47,
292    97,
293    197,
294    397,
295    797,
296    1597,
297    3203,
298    6421,
299    12853,
300    25717,
301    51437,
302    102877,
303    205759,
304    411527,
305    823117,
306    1646237,
307    3292489,
308    6584983,
309    13169977,
310    26339969,
311    52679969,
312    105359939,
313    210719881,
314    421439783,
315    842879579,
316    1685759167,
317    0,
318};
319
320void BasicHashtableImpl::determineCapacity(size_t minimumCapacity, float loadFactor,
321        size_t* __restrict__ outBucketCount, size_t* __restrict__ outCapacity) {
322    LOG_ALWAYS_FATAL_IF(loadFactor <= 0.0f || loadFactor > 1.0f,
323            "Invalid load factor %0.3f.  Must be in the range (0, 1].", loadFactor);
324
325    size_t count = ceilf(minimumCapacity / loadFactor) + 1;
326    size_t i = 0;
327    while (count > PRIMES[i] && i < NELEM(PRIMES)) {
328        i++;
329    }
330    count = PRIMES[i];
331    LOG_ALWAYS_FATAL_IF(!count, "Could not determine required number of buckets for "
332            "hashtable with minimum capacity %u and load factor %0.3f.",
333            uint32_t(minimumCapacity), loadFactor);
334    *outBucketCount = count;
335    *outCapacity = ceilf((count - 1) * loadFactor);
336}
337
338}; // namespace android
339