1#ifndef _LINUX_HASH_H
2#define _LINUX_HASH_H
3
4#include <inttypes.h>
5#include "arch/arch.h"
6
7/* Fast hashing routine for a long.
8   (C) 2002 William Lee Irwin III, IBM */
9
10/*
11 * Knuth recommends primes in approximately golden ratio to the maximum
12 * integer representable by a machine word for multiplicative hashing.
13 * Chuck Lever verified the effectiveness of this technique:
14 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
15 *
16 * These primes are chosen to be bit-sparse, that is operations on
17 * them can use shifts and additions instead of multiplications for
18 * machines where multiplications are slow.
19 */
20
21#if BITS_PER_LONG == 32
22/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
23#define GOLDEN_RATIO_PRIME 0x9e370001UL
24#elif BITS_PER_LONG == 64
25/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
26#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
27#else
28#error Define GOLDEN_RATIO_PRIME for your wordsize.
29#endif
30
31#define GR_PRIME_64	0x9e37fffffffc0001ULL
32
33static inline unsigned long __hash_long(unsigned long val)
34{
35	unsigned long hash = val;
36
37#if BITS_PER_LONG == 64
38	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
39	unsigned long n = hash;
40	n <<= 18;
41	hash -= n;
42	n <<= 33;
43	hash -= n;
44	n <<= 3;
45	hash += n;
46	n <<= 3;
47	hash -= n;
48	n <<= 4;
49	hash += n;
50	n <<= 2;
51	hash += n;
52#else
53	/* On some cpus multiply is faster, on others gcc will do shifts */
54	hash *= GOLDEN_RATIO_PRIME;
55#endif
56
57	return hash;
58}
59
60static inline unsigned long hash_long(unsigned long val, unsigned int bits)
61{
62	/* High bits are more random, so use them. */
63	return __hash_long(val) >> (BITS_PER_LONG - bits);
64}
65
66static inline uint64_t __hash_u64(uint64_t val)
67{
68	return val * GR_PRIME_64;
69}
70
71static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
72{
73	return hash_long((uintptr_t)ptr, bits);
74}
75
76/*
77 * Bob Jenkins jhash
78 */
79
80#define JHASH_INITVAL	GOLDEN_RATIO_PRIME
81
82static inline uint32_t rol32(uint32_t word, uint32_t shift)
83{
84	return (word << shift) | (word >> (32 - shift));
85}
86
87/* __jhash_mix -- mix 3 32-bit values reversibly. */
88#define __jhash_mix(a, b, c)			\
89{						\
90	a -= c;  a ^= rol32(c, 4);  c += b;	\
91	b -= a;  b ^= rol32(a, 6);  a += c;	\
92	c -= b;  c ^= rol32(b, 8);  b += a;	\
93	a -= c;  a ^= rol32(c, 16); c += b;	\
94	b -= a;  b ^= rol32(a, 19); a += c;	\
95	c -= b;  c ^= rol32(b, 4);  b += a;	\
96}
97
98/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
99#define __jhash_final(a, b, c)			\
100{						\
101	c ^= b; c -= rol32(b, 14);		\
102	a ^= c; a -= rol32(c, 11);		\
103	b ^= a; b -= rol32(a, 25);		\
104	c ^= b; c -= rol32(b, 16);		\
105	a ^= c; a -= rol32(c, 4);		\
106	b ^= a; b -= rol32(a, 14);		\
107	c ^= b; c -= rol32(b, 24);		\
108}
109
110static inline uint32_t jhash(const void *key, uint32_t length, uint32_t initval)
111{
112	const uint8_t *k = key;
113	uint32_t a, b, c;
114
115	/* Set up the internal state */
116	a = b = c = JHASH_INITVAL + length + initval;
117
118	/* All but the last block: affect some 32 bits of (a,b,c) */
119	while (length > 12) {
120		a += *k;
121		b += *(k + 4);
122		c += *(k + 8);
123		__jhash_mix(a, b, c);
124		length -= 12;
125		k += 12;
126	}
127
128	/* Last block: affect all 32 bits of (c) */
129	/* All the case statements fall through */
130	switch (length) {
131	case 12: c += (uint32_t) k[11] << 24;
132	case 11: c += (uint32_t) k[10] << 16;
133	case 10: c += (uint32_t) k[9] << 8;
134	case 9:  c += k[8];
135	case 8:  b += (uint32_t) k[7] << 24;
136	case 7:  b += (uint32_t) k[6] << 16;
137	case 6:  b += (uint32_t) k[5] << 8;
138	case 5:  b += k[4];
139	case 4:  a += (uint32_t) k[3] << 24;
140	case 3:  a += (uint32_t) k[2] << 16;
141	case 2:  a += (uint32_t) k[1] << 8;
142	case 1:  a += k[0];
143		 __jhash_final(a, b, c);
144	case 0: /* Nothing left to add */
145		break;
146	}
147
148	return c;
149}
150
151#endif /* _LINUX_HASH_H */
152