Merge branch 'rhashtables-cleanups'

NeilBrown says:

====================
Assorted rhashtables cleanups.

Following 7 patches are selections from a recent RFC series I posted
that have all received suitable Acks.

The most visible changes are that rhashtable-types.h is now preferred
for inclusion in include/linux/*.h rather than rhashtable.h, and
that the full hash is used - no bits a reserved for a NULLS pointer.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-06-22 13:43:28 +09:00
commit 66caeeb99d
28 changed files with 191 additions and 212 deletions

View File

@ -12162,7 +12162,9 @@ M: Herbert Xu <herbert@gondor.apana.org.au>
L: netdev@vger.kernel.org
S: Maintained
F: lib/rhashtable.c
F: lib/test_rhashtable.c
F: include/linux/rhashtable.h
F: include/linux/rhashtable-types.h
RICOH R5C592 MEMORYSTICK DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>

View File

@ -46,6 +46,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/rhashtable.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>

View File

@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/uidgid.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
#include <linux/refcount.h>

View File

@ -9,7 +9,7 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/refcount.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
struct user_namespace;

View File

@ -2,7 +2,7 @@
#define __LINUX_MROUTE_BASE_H
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/sock.h>

View File

@ -0,0 +1,137 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
* Simple structures that might be needed in include
* files.
*/
#ifndef _LINUX_RHASHTABLE_TYPES_H
#define _LINUX_RHASHTABLE_TYPES_H
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
struct rhash_head {
struct rhash_head __rcu *next;
};
struct rhlist_head {
struct rhash_head rhead;
struct rhlist_head __rcu *next;
};
struct bucket_table;
/**
* struct rhashtable_compare_arg - Key for the function rhashtable_compare
* @ht: Hash table
* @key: Key to compare against
*/
struct rhashtable_compare_arg {
struct rhashtable *ht;
const void *key;
};
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
const void *obj);
/**
* struct rhashtable_params - Hash table construction parameters
* @nelem_hint: Hint on number of elements, should be 75% of desired size
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
* @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
* @automatic_shrinking: Enable automatic shrinking of tables
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
* @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
u16 nelem_hint;
u16 key_len;
u16 key_offset;
u16 head_offset;
unsigned int max_size;
u16 min_size;
bool automatic_shrinking;
u8 locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
* @key_len: Key length for hashfn
* @max_elems: Maximum number of elements in table
* @p: Configuration parameters
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
* @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
unsigned int key_len;
unsigned int max_elems;
struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
atomic_t nelems;
};
/**
* struct rhltable - Hash table with duplicate objects in a list
* @ht: Underlying rhtable
*/
struct rhltable {
struct rhashtable ht;
};
/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
*/
struct rhashtable_walker {
struct list_head list;
struct bucket_table *tbl;
};
/**
* struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
* @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
*/
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
struct rhlist_head *list;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
bool end_of_table;
};
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
int rhltable_init(struct rhltable *hlt,
const struct rhashtable_params *params);
#endif /* _LINUX_RHASHTABLE_TYPES_H */

View File

@ -1,3 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
@ -17,37 +18,18 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rhashtable-types.h>
/*
* The end of the chain is marked with a special nulls marks which has
* the following format:
*
* +-------+-----------------------------------------------------+-+
* | Base | Hash |1|
* +-------+-----------------------------------------------------+-+
*
* Base (4 bits) : Reserved to distinguish between multiple tables.
* Specified via &struct rhashtable_params.nulls_base.
* Hash (27 bits): Full hash (unmasked) of first element added to bucket
* 1 (1 bit) : Nulls marker (always set)
*
* The remaining bits of the next pointer remain unused for now.
* the least significant bit set.
*/
#define RHT_BASE_BITS 4
#define RHT_HASH_BITS 27
#define RHT_BASE_SHIFT RHT_HASH_BITS
/* Base bits plus 1 bit for nulls marker */
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
/* Maximum chain length before rehash
*
@ -64,15 +46,6 @@
*/
#define RHT_ELASTICITY 16u
struct rhash_head {
struct rhash_head __rcu *next;
};
struct rhlist_head {
struct rhash_head rhead;
struct rhlist_head __rcu *next;
};
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@ -102,132 +75,14 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/**
* struct rhashtable_compare_arg - Key for the function rhashtable_compare
* @ht: Hash table
* @key: Key to compare against
*/
struct rhashtable_compare_arg {
struct rhashtable *ht;
const void *key;
};
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
const void *obj);
struct rhashtable;
/**
* struct rhashtable_params - Hash table construction parameters
* @nelem_hint: Hint on number of elements, should be 75% of desired size
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
* @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
* @automatic_shrinking: Enable automatic shrinking of tables
* @nulls_base: Base value to generate nulls marker
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
* @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
u16 nelem_hint;
u16 key_len;
u16 key_offset;
u16 head_offset;
unsigned int max_size;
u16 min_size;
bool automatic_shrinking;
u8 locks_mul;
u32 nulls_base;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
* @key_len: Key length for hashfn
* @max_elems: Maximum number of elements in table
* @p: Configuration parameters
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
* @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
unsigned int key_len;
unsigned int max_elems;
struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
atomic_t nelems;
};
/**
* struct rhltable - Hash table with duplicate objects in a list
* @ht: Underlying rhtable
*/
struct rhltable {
struct rhashtable ht;
};
/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
*/
struct rhashtable_walker {
struct list_head list;
struct bucket_table *tbl;
};
/**
* struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
* @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
*/
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
struct rhlist_head *list;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
bool end_of_table;
};
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
{
return NULLS_MARKER(ht->p.nulls_base + hash);
}
#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
((ptr) = (typeof(ptr)) rht_marker(ht, hash))
#define INIT_RHT_NULLS_HEAD(ptr) \
((ptr) = (typeof(ptr)) NULLS_MARKER(0))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
return ((unsigned long) ptr & 1);
}
static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
{
return ((unsigned long) ptr) >> 1;
}
static inline void *rht_obj(const struct rhashtable *ht,
const struct rhash_head *he)
{
@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht,
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
unsigned int hash)
{
return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
return hash & (tbl->size - 1);
}
static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
int rhltable_init(struct rhltable *hlt,
const struct rhashtable_params *params);
void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj);
@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
spin_unlock_bh(lock);
rcu_read_unlock();

View File

@ -2,7 +2,7 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
struct netns_frags {
/* sysctls */

View File

@ -4,7 +4,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
#include <linux/rcupdate.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/dst.h>

View File

@ -48,7 +48,7 @@
#define __sctp_structs_h__
#include <linux/ktime.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
#include <linux/socket.h> /* linux/in.h needs this!! */
#include <linux/in.h> /* We get struct sockaddr_in. */
#include <linux/in6.h> /* We get struct in6_addr */

View File

@ -18,7 +18,7 @@
#include <linux/ipv6.h>
#include <net/lwtunnel.h>
#include <linux/seg6.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
__be32 to)

View File

@ -22,7 +22,7 @@
#include <linux/route.h>
#include <net/seg6.h>
#include <linux/seg6_hmac.h>
#include <linux/rhashtable.h>
#include <linux/rhashtable-types.h>
#define SEG6_HMAC_MAX_DIGESTSIZE 160
#define SEG6_HMAC_RING_SIZE 256

View File

@ -38,6 +38,7 @@
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <linux/rhashtable.h>
#include <asm/current.h>
#include <linux/uaccess.h>

View File

@ -86,6 +86,7 @@
#include <linux/ipc_namespace.h>
#include <linux/sched/wake_q.h>
#include <linux/nospec.h>
#include <linux/rhashtable.h>
#include <linux/uaccess.h>
#include "util.h"

View File

@ -43,6 +43,7 @@
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
#include <linux/rhashtable.h>
#include <linux/uaccess.h>

View File

@ -63,6 +63,7 @@
#include <linux/rwsem.h>
#include <linux/memory.h>
#include <linux/ipc_namespace.h>
#include <linux/rhashtable.h>
#include <asm/unistd.h>

View File

@ -28,6 +28,7 @@
#include <linux/rhashtable.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
@ -115,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
unsigned int shifted,
unsigned int nhash)
bool leaf)
{
union nested_table *ntbl;
int i;
@ -127,10 +127,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (ntbl && shifted) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
(i << shifted) | nhash);
if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
@ -156,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
0, 0)) {
false)) {
kfree(tbl);
return NULL;
}
@ -206,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
@ -227,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl = rhashtable_last_table(ht,
rht_dereference_rcu(old_tbl->future_tbl, ht));
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
int err = -EAGAIN;
struct rhash_head *head, *next, *entry;
@ -298,21 +296,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{
/* Protect future_tbl using the first bucket lock. */
spin_lock_bh(old_tbl->locks);
/* Did somebody beat us to it? */
if (rcu_access_pointer(old_tbl->future_tbl)) {
spin_unlock_bh(old_tbl->locks);
return -EEXIST;
}
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
* As cmpxchg() provides strong barriers, we do not need
* rcu_assign_pointer().
*/
rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
spin_unlock_bh(old_tbl->locks);
if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
return -EEXIST;
return 0;
}
@ -475,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
fail:
/* Do not fail the insert if someone else did a rehash. */
if (likely(rcu_dereference_raw(tbl->future_tbl)))
if (likely(rcu_access_pointer(tbl->future_tbl)))
return 0;
/* Schedule async rehash to retry allocation in process context. */
@ -548,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
return ERR_CAST(data);
new_tbl = rcu_dereference(tbl->future_tbl);
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (new_tbl)
return new_tbl;
@ -607,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
break;
spin_unlock_bh(lock);
tbl = rcu_dereference(tbl->future_tbl);
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@ -994,7 +985,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .key_offset = offsetof(struct test_obj, key),
* .key_len = sizeof(int),
* .hashfn = jhash,
* .nulls_base = (1U << RHT_BASE_SHIFT),
* };
*
* Configuration Example 2: Variable length keys
@ -1028,9 +1018,6 @@ int rhashtable_init(struct rhashtable *ht,
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
return -EINVAL;
memset(ht, 0, sizeof(*ht));
mutex_init(&ht->mutex);
spin_lock_init(&ht->lock);
@ -1095,10 +1082,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
/* No rhlist NULLs marking for now. */
if (params->nulls_base)
return -EINVAL;
err = rhashtable_init(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
@ -1216,25 +1199,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
unsigned int shifted;
unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest;
nhash = index;
shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift) ? shifted : 0, nhash);
size <= (1 << shift));
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
size >>= shift;
hash >>= shift;
nhash |= index << shifted;
shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift) ? shifted : 0,
nhash);
size <= (1 << shift));
}
if (!ntbl)

View File

@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
{
const struct test_obj_rhl *obj = data;
return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
return (obj->value.id % 10);
}
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
.nulls_base = (3U << RHT_BASE_SHIFT),
};
static struct rhashtable_params test_rht_params_dup = {
@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries)
if (!obj_in_table)
goto out_free;
/* nulls_base not supported in rhlist interface */
test_rht_params.nulls_base = 0;
err = rhltable_init(&rhlt, &test_rht_params);
if (WARN_ON(err))
goto out_free;
@ -501,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
unsigned int i, cnt = 0;
ht = &rhlt->ht;
/* Take the mutex to avoid RCU warning */
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
@ -534,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
mutex_unlock(&ht->mutex);
return cnt;
}

View File

@ -45,8 +45,8 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
!= sizeof(u32));
/* Use cyclic increasing ID as direct hash key, see rht_bucket_index */
return key << RHT_HASH_RESERVED_SPACE;
/* Use cyclic increasing ID as direct hash key */
return key;
}
static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,

View File

@ -20,6 +20,7 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>
#include <net/sock.h>
#include <net/inet_frag.h>

View File

@ -60,6 +60,7 @@
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/rhashtable.h>
#include <net/ip_tunnels.h>
#include <net/checksum.h>
#include <net/netlink.h>

View File

@ -2,6 +2,7 @@
* Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
*/
#include <linux/rhashtable.h>
#include <linux/mroute_base.h>
/* Sets everything common except 'dev', since that is done under locking */

View File

@ -32,6 +32,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/rhashtable.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/raw.h>

View File

@ -17,6 +17,7 @@
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>
#include <net/ipv6.h>
#include <net/protocol.h>

View File

@ -22,6 +22,7 @@
#include <linux/icmpv6.h>
#include <linux/mroute6.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>

View File

@ -14,6 +14,7 @@
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/vmalloc.h>
#include <linux/rhashtable.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nf_tables.h>

View File

@ -56,6 +56,7 @@
#include <net/sctp/sm.h>
#include <net/sctp/checksum.h>
#include <net/net_namespace.h>
#include <linux/rhashtable.h>
/* Forward declarations for internal helpers. */
static int sctp_rcv_ootb(struct sk_buff *);

View File

@ -66,6 +66,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/rhashtable.h>
#include <net/ip.h>
#include <net/icmp.h>