ipvs: convert lblc scheduler to rcu

The schedule method now needs _rcu list-traversal
primitive for svc->destinations. The read_lock for sched_lock is
removed. Use a dead flag to prevent new entries to be created
while scheduler is reclaimed. Use hlist for the hash table.

Signed-off-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>
This commit is contained in:
Julian Anastasov 2013-03-22 11:46:40 +02:00 committed by Pablo Neira Ayuso
parent 8f3d0023b9
commit c2a4ffb70e

View File

@ -90,11 +90,12 @@
* IP address and its destination server * IP address and its destination server
*/ */
struct ip_vs_lblc_entry { struct ip_vs_lblc_entry {
struct list_head list; struct hlist_node list;
int af; /* address family */ int af; /* address family */
union nf_inet_addr addr; /* destination IP address */ union nf_inet_addr addr; /* destination IP address */
struct ip_vs_dest *dest; /* real server (cache) */ struct ip_vs_dest __rcu *dest; /* real server (cache) */
unsigned long lastuse; /* last used time */ unsigned long lastuse; /* last used time */
struct rcu_head rcu_head;
}; };
@ -102,12 +103,14 @@ struct ip_vs_lblc_entry {
* IPVS lblc hash table * IPVS lblc hash table
*/ */
struct ip_vs_lblc_table { struct ip_vs_lblc_table {
struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ struct rcu_head rcu_head;
struct hlist_head __rcu bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
struct timer_list periodic_timer; /* collect stale entries */
atomic_t entries; /* number of entries */ atomic_t entries; /* number of entries */
int max_size; /* maximum size of entries */ int max_size; /* maximum size of entries */
struct timer_list periodic_timer; /* collect stale entries */
int rover; /* rover for expire check */ int rover; /* rover for expire check */
int counter; /* counter for no expire */ int counter; /* counter for no expire */
bool dead;
}; };
@ -129,13 +132,16 @@ static ctl_table vs_vars_table[] = {
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{ {
list_del(&en->list); struct ip_vs_dest *dest;
hlist_del_rcu(&en->list);
/* /*
* We don't kfree dest because it is referred either by its service * We don't kfree dest because it is referred either by its service
* or the trash dest list. * or the trash dest list.
*/ */
atomic_dec(&en->dest->refcnt); dest = rcu_dereference_protected(en->dest, 1);
kfree(en); ip_vs_dest_put(dest);
kfree_rcu(en, rcu_head);
} }
@ -165,15 +171,12 @@ ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
{ {
unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr); unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
list_add(&en->list, &tbl->bucket[hash]); hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
atomic_inc(&tbl->entries); atomic_inc(&tbl->entries);
} }
/* /* Get ip_vs_lblc_entry associated with supplied parameters. */
* Get ip_vs_lblc_entry associated with supplied parameters. Called under read
* lock
*/
static inline struct ip_vs_lblc_entry * static inline struct ip_vs_lblc_entry *
ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
const union nf_inet_addr *addr) const union nf_inet_addr *addr)
@ -181,7 +184,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
unsigned int hash = ip_vs_lblc_hashkey(af, addr); unsigned int hash = ip_vs_lblc_hashkey(af, addr);
struct ip_vs_lblc_entry *en; struct ip_vs_lblc_entry *en;
list_for_each_entry(en, &tbl->bucket[hash], list) hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
if (ip_vs_addr_equal(af, &en->addr, addr)) if (ip_vs_addr_equal(af, &en->addr, addr))
return en; return en;
@ -209,14 +212,20 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
ip_vs_addr_copy(dest->af, &en->addr, daddr); ip_vs_addr_copy(dest->af, &en->addr, daddr);
en->lastuse = jiffies; en->lastuse = jiffies;
atomic_inc(&dest->refcnt); ip_vs_dest_hold(dest);
en->dest = dest; RCU_INIT_POINTER(en->dest, dest);
ip_vs_lblc_hash(tbl, en); ip_vs_lblc_hash(tbl, en);
} else if (en->dest != dest) { } else {
atomic_dec(&en->dest->refcnt); struct ip_vs_dest *old_dest;
atomic_inc(&dest->refcnt);
en->dest = dest; old_dest = rcu_dereference_protected(en->dest, 1);
if (old_dest != dest) {
ip_vs_dest_put(old_dest);
ip_vs_dest_hold(dest);
/* No ordering constraints for refcnt */
RCU_INIT_POINTER(en->dest, dest);
}
} }
return en; return en;
@ -226,17 +235,22 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
/* /*
* Flush all the entries of the specified table. * Flush all the entries of the specified table.
*/ */
static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) static void ip_vs_lblc_flush(struct ip_vs_service *svc)
{ {
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_entry *en;
struct hlist_node *next;
int i; int i;
write_lock_bh(&svc->sched_lock);
tbl->dead = 1;
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblc_free(en); ip_vs_lblc_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
} }
write_unlock_bh(&svc->sched_lock);
} }
static int sysctl_lblc_expiration(struct ip_vs_service *svc) static int sysctl_lblc_expiration(struct ip_vs_service *svc)
@ -252,7 +266,8 @@ static int sysctl_lblc_expiration(struct ip_vs_service *svc)
static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
{ {
struct ip_vs_lblc_table *tbl = svc->sched_data; struct ip_vs_lblc_table *tbl = svc->sched_data;
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_entry *en;
struct hlist_node *next;
unsigned long now = jiffies; unsigned long now = jiffies;
int i, j; int i, j;
@ -260,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&svc->sched_lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, if (time_before(now,
en->lastuse + en->lastuse +
sysctl_lblc_expiration(svc))) sysctl_lblc_expiration(svc)))
@ -293,7 +308,8 @@ static void ip_vs_lblc_check_expire(unsigned long data)
unsigned long now = jiffies; unsigned long now = jiffies;
int goal; int goal;
int i, j; int i, j;
struct ip_vs_lblc_entry *en, *nxt; struct ip_vs_lblc_entry *en;
struct hlist_node *next;
if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
/* do full expiration check */ /* do full expiration check */
@ -315,7 +331,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&svc->sched_lock); write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
continue; continue;
@ -354,11 +370,12 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
* Initialize the hash buckets * Initialize the hash buckets
*/ */
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
INIT_LIST_HEAD(&tbl->bucket[i]); INIT_HLIST_HEAD(&tbl->bucket[i]);
} }
tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
tbl->rover = 0; tbl->rover = 0;
tbl->counter = 1; tbl->counter = 1;
tbl->dead = 0;
/* /*
* Hook periodic timer for garbage collection * Hook periodic timer for garbage collection
@ -379,10 +396,10 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
del_timer_sync(&tbl->periodic_timer); del_timer_sync(&tbl->periodic_timer);
/* got to clean up table entries here */ /* got to clean up table entries here */
ip_vs_lblc_flush(tbl); ip_vs_lblc_flush(svc);
/* release the table itself */ /* release the table itself */
kfree(tbl); kfree_rcu(tbl, rcu_head);
IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
sizeof(*tbl)); sizeof(*tbl));
@ -408,7 +425,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
* The server with weight=0 is quiesced and will not receive any * The server with weight=0 is quiesced and will not receive any
* new connection. * new connection.
*/ */
list_for_each_entry(dest, &svc->destinations, n_list) { list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD) if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue; continue;
if (atomic_read(&dest->weight) > 0) { if (atomic_read(&dest->weight) > 0) {
@ -423,7 +440,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
* Find the destination with the least load. * Find the destination with the least load.
*/ */
nextstage: nextstage:
list_for_each_entry_continue(dest, &svc->destinations, n_list) { list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD) if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue; continue;
@ -457,7 +474,7 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
struct ip_vs_dest *d; struct ip_vs_dest *d;
list_for_each_entry(d, &svc->destinations, n_list) { list_for_each_entry_rcu(d, &svc->destinations, n_list) {
if (atomic_read(&d->activeconns)*2 if (atomic_read(&d->activeconns)*2
< atomic_read(&d->weight)) { < atomic_read(&d->weight)) {
return 1; return 1;
@ -484,7 +501,6 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/* First look in our cache */ /* First look in our cache */
read_lock(&svc->sched_lock);
en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr); en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr);
if (en) { if (en) {
/* We only hold a read lock, but this is atomic */ /* We only hold a read lock, but this is atomic */
@ -499,14 +515,11 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
* free up entries from the trash at any time. * free up entries from the trash at any time.
*/ */
if (en->dest->flags & IP_VS_DEST_F_AVAILABLE) dest = rcu_dereference(en->dest);
dest = en->dest; if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
goto out;
} }
read_unlock(&svc->sched_lock);
/* If the destination has a weight and is not overloaded, use it */
if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
goto out;
/* No cache entry or it is invalid, time to schedule */ /* No cache entry or it is invalid, time to schedule */
dest = __ip_vs_lblc_schedule(svc); dest = __ip_vs_lblc_schedule(svc);
@ -517,7 +530,8 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* If we fail to create a cache entry, we'll just use the valid dest */ /* If we fail to create a cache entry, we'll just use the valid dest */
write_lock(&svc->sched_lock); write_lock(&svc->sched_lock);
ip_vs_lblc_new(tbl, &iph.daddr, dest); if (!tbl->dead)
ip_vs_lblc_new(tbl, &iph.daddr, dest);
write_unlock(&svc->sched_lock); write_unlock(&svc->sched_lock);
out: out: