| /* SPDX-License-Identifier: GPL-2.0 */ |
| /* |
| * Original code taken from 'linux/include/linux/hash{,table}.h' |
| */ |
| #ifndef __EROFS_HASHTABLE_H |
| #define __EROFS_HASHTABLE_H |
| |
| #ifdef __cplusplus |
| extern "C" |
| { |
| #endif |
| |
| /* |
| * Fast hashing routine for ints, longs and pointers. |
| * (C) 2002 Nadia Yvette Chambers, IBM |
| */ |
| |
| /* |
| * Statically sized hash table implementation |
| * (C) 2012 Sasha Levin <levinsasha928@gmail.com> |
| */ |
| |
| #include "defs.h" |
| |
| /* |
| * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and |
| * fs/inode.c. It's not actually prime any more (the previous primes |
| * were actively bad for hashing), but the name remains. |
| */ |
| #if BITS_PER_LONG == 32 |
| #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 |
| #define hash_long(val, bits) hash_32(val, bits) |
| #elif BITS_PER_LONG == 64 |
| #define hash_long(val, bits) hash_64(val, bits) |
| #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 |
| #else |
| #error Wordsize not 32 or 64 |
| #endif |
| |
| /* |
| * This hash multiplies the input by a large odd number and takes the |
| * high bits. Since multiplication propagates changes to the most |
| * significant end only, it is essential that the high bits of the |
| * product be used for the hash value. |
| * |
| * Chuck Lever verified the effectiveness of this technique: |
| * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf |
| * |
| * Although a random odd number will do, it turns out that the golden |
| * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice |
| * properties. (See Knuth vol 3, section 6.4, exercise 9.) |
| * |
| * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, |
| * which is very slightly easier to multiply by and makes no |
| * difference to the hash distribution. |
| */ |
| #define GOLDEN_RATIO_32 0x61C88647 |
| #define GOLDEN_RATIO_64 0x61C8864680B583EBull |
| |
| struct hlist_head { |
| struct hlist_node *first; |
| }; |
| |
| struct hlist_node { |
| struct hlist_node *next, **pprev; |
| }; |
| |
| /* |
| * Architectures might want to move the poison pointer offset |
| * into some well-recognized area such as 0xdead000000000000, |
| * that is also not mappable by user-space exploits: |
| */ |
| #ifdef CONFIG_ILLEGAL_POINTER_VALUE |
| # define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) |
| #else |
| # define POISON_POINTER_DELTA 0 |
| #endif |
| |
| /* |
| * These are non-NULL pointers that will result in page faults |
| * under normal circumstances, used to verify that nobody uses |
| * non-initialized list entries. |
| */ |
| #define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) |
| #define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) |
| |
| /* |
| * Double linked lists with a single pointer list head. |
| * Mostly useful for hash tables where the two pointer list head is |
| * too wasteful. |
| * You lose the ability to access the tail in O(1). |
| */ |
| |
| #define HLIST_HEAD_INIT { .first = NULL } |
| #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } |
| #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) |
| static inline void INIT_HLIST_NODE(struct hlist_node *h) |
| { |
| h->next = NULL; |
| h->pprev = NULL; |
| } |
| |
| static inline int hlist_unhashed(const struct hlist_node *h) |
| { |
| return !h->pprev; |
| } |
| |
| static inline int hlist_empty(const struct hlist_head *h) |
| { |
| return !h->first; |
| } |
| |
| static inline void __hlist_del(struct hlist_node *n) |
| { |
| struct hlist_node *next = n->next; |
| struct hlist_node **pprev = n->pprev; |
| |
| *pprev = next; |
| if (next) |
| next->pprev = pprev; |
| } |
| |
| static inline void hlist_del(struct hlist_node *n) |
| { |
| __hlist_del(n); |
| n->next = LIST_POISON1; |
| n->pprev = LIST_POISON2; |
| } |
| |
| static inline void hlist_del_init(struct hlist_node *n) |
| { |
| if (!hlist_unhashed(n)) { |
| __hlist_del(n); |
| INIT_HLIST_NODE(n); |
| } |
| } |
| |
| static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
| { |
| struct hlist_node *first = h->first; |
| |
| n->next = first; |
| if (first) |
| first->pprev = &n->next; |
| h->first = n; |
| n->pprev = &h->first; |
| } |
| |
| /* next must be != NULL */ |
| static inline void hlist_add_before(struct hlist_node *n, |
| struct hlist_node *next) |
| { |
| n->pprev = next->pprev; |
| n->next = next; |
| next->pprev = &n->next; |
| *(n->pprev) = n; |
| } |
| |
| static inline void hlist_add_behind(struct hlist_node *n, |
| struct hlist_node *prev) |
| { |
| n->next = prev->next; |
| prev->next = n; |
| n->pprev = &prev->next; |
| |
| if (n->next) |
| n->next->pprev = &n->next; |
| } |
| |
| /* after that we'll appear to be on some hlist and hlist_del will work */ |
| static inline void hlist_add_fake(struct hlist_node *n) |
| { |
| n->pprev = &n->next; |
| } |
| |
| /* |
| * Move a list from one list head to another. Fixup the pprev |
| * reference of the first entry if it exists. |
| */ |
| static inline void hlist_move_list(struct hlist_head *old, |
| struct hlist_head *new) |
| { |
| new->first = old->first; |
| if (new->first) |
| new->first->pprev = &new->first; |
| old->first = NULL; |
| } |
| |
| #define hlist_entry(ptr, type, member) container_of(ptr, type, member) |
| |
| #define hlist_for_each(pos, head) \ |
| for (pos = (head)->first; pos; pos = pos->next) |
| |
| #define hlist_for_each_safe(pos, n, head) \ |
| for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
| pos = n) |
| |
| #define hlist_entry_safe(ptr, type, member) \ |
| ({ typeof(ptr) ____ptr = (ptr); \ |
| ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ |
| }) |
| |
| /** |
| * hlist_for_each_entry - iterate over list of given type |
| * @pos:the type * to use as a loop cursor. |
| * @head:the head for your list. |
| * @member:the name of the hlist_node within the struct. |
| */ |
| #define hlist_for_each_entry(pos, head, member) \ |
| for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ |
| pos; \ |
| pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
| |
| /** |
| * hlist_for_each_entry_continue |
| * iterate over a hlist continuing after current point |
| * @pos:the type * to use as a loop cursor. |
| * @member:the name of the hlist_node within the struct. |
| */ |
| #define hlist_for_each_entry_continue(pos, member) \ |
| for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ |
| pos; \ |
| pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
| |
| /** |
| * hlist_for_each_entry_from |
| * iterate over a hlist continuing from current point |
| * @pos: the type * to use as a loop cursor. |
| * @member: the name of the hlist_node within the struct. |
| */ |
| #define hlist_for_each_entry_from(pos, member) \ |
| for (; pos; \ |
| pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
| |
| /** |
| * hlist_for_each_entry_safe |
| * iterate over list of given type safe against removal of list entry |
| * @pos:the type * to use as a loop cursor. |
| * @n:another &struct hlist_node to use as temporary storage |
| * @head:the head for your list. |
| * @member:the name of the hlist_node within the struct. |
| */ |
| #define hlist_for_each_entry_safe(pos, n, head, member) \ |
| for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ |
| pos && ({ n = pos->member.next; 1; }); \ |
| pos = hlist_entry_safe(n, typeof(*pos), member)) |
| |
| static inline u32 __hash_32(u32 val) |
| { |
| return val * GOLDEN_RATIO_32; |
| } |
| |
| static inline u32 hash_32(u32 val, unsigned int bits) |
| { |
| /* High bits are more random, so use them. */ |
| return __hash_32(val) >> (32 - bits); |
| } |
| |
| static inline u32 hash_64(u64 val, unsigned int bits) |
| { |
| #if BITS_PER_LONG == 64 |
| /* 64x64-bit multiply is efficient on all 64-bit processors */ |
| return val * GOLDEN_RATIO_64 >> (64 - bits); |
| #else |
| /* Hash 64 bits using only 32x32-bit multiply. */ |
| return hash_32((u32)val ^ __hash_32(val >> 32), bits); |
| #endif |
| } |
| |
| #define DEFINE_HASHTABLE(name, bits) \ |
| struct hlist_head name[1 << (bits)] = \ |
| { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } |
| |
| #define DECLARE_HASHTABLE(name, bits) \ |
| struct hlist_head name[1 << (bits)] |
| |
| #define HASH_SIZE(name) (ARRAY_SIZE(name)) |
| #define HASH_BITS(name) ilog2(HASH_SIZE(name)) |
| |
| /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/ |
| #define hash_min(val, bits) \ |
| (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) |
| |
| static inline void __hash_init(struct hlist_head *ht, unsigned int sz) |
| { |
| unsigned int i; |
| |
| for (i = 0; i < sz; i++) |
| INIT_HLIST_HEAD(&ht[i]); |
| } |
| |
| /** |
| * hash_init - initialize a hash table |
| * @hashtable: hashtable to be initialized |
| * |
| * Calculates the size of the hashtable from the given parameter, otherwise |
| * same as hash_init_size. |
| * |
| * This has to be a macro since HASH_BITS() will not work on pointers since |
| * it calculates the size during preprocessing. |
| */ |
| #define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) |
| |
| /** |
| * hash_add - add an object to a hashtable |
| * @hashtable: hashtable to add to |
| * @node: the &struct hlist_node of the object to be added |
| * @key: the key of the object to be added |
| */ |
| #define hash_add(hashtable, node, key) \ |
| hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) |
| |
| /** |
| * hash_hashed - check whether an object is in any hashtable |
| * @node: the &struct hlist_node of the object to be checked |
| */ |
| static inline bool hash_hashed(struct hlist_node *node) |
| { |
| return !hlist_unhashed(node); |
| } |
| |
| static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) |
| { |
| unsigned int i; |
| |
| for (i = 0; i < sz; i++) |
| if (!hlist_empty(&ht[i])) |
| return false; |
| |
| return true; |
| } |
| |
| /** |
| * hash_empty - check whether a hashtable is empty |
| * @hashtable: hashtable to check |
| * |
| * This has to be a macro since HASH_BITS() will not work on pointers since |
| * it calculates the size during preprocessing. |
| */ |
| #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) |
| |
| /** |
| * hash_del - remove an object from a hashtable |
| * @node: &struct hlist_node of the object to remove |
| */ |
| static inline void hash_del(struct hlist_node *node) |
| { |
| hlist_del_init(node); |
| } |
| |
| /** |
| * hash_for_each - iterate over a hashtable |
| * @name: hashtable to iterate |
| * @bkt: integer to use as bucket loop cursor |
| * @obj: the type * to use as a loop cursor for each entry |
| * @member: the name of the hlist_node within the struct |
| */ |
| #define hash_for_each(name, bkt, obj, member) \ |
| for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ |
| (bkt)++)\ |
| hlist_for_each_entry(obj, &name[bkt], member) |
| |
| /** |
| * hash_for_each_safe - iterate over a hashtable safe against removal of |
| * hash entry |
| * @name: hashtable to iterate |
| * @bkt: integer to use as bucket loop cursor |
| * @tmp: a &struct used for temporary storage |
| * @obj: the type * to use as a loop cursor for each entry |
| * @member: the name of the hlist_node within the struct |
| */ |
| #define hash_for_each_safe(name, bkt, tmp, obj, member) \ |
| for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ |
| (bkt)++)\ |
| hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) |
| |
| /** |
| * hash_for_each_possible - iterate over all possible objects hashing to the |
| * same bucket |
| * @name: hashtable to iterate |
| * @obj: the type * to use as a loop cursor for each entry |
| * @member: the name of the hlist_node within the struct |
| * @key: the key of the objects to iterate over |
| */ |
| #define hash_for_each_possible(name, obj, member, key) \ |
| hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) |
| |
| #ifdef __cplusplus |
| } |
| #endif |
| |
| #endif |