mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter/IPVS fixes for net The following patchset contains Netfilter fixes for net: 1) Fix memleak reported by syzkaller when registering IPVS hooks, patch from Julian Anastasov. 2) Fix memory leak in start_sync_thread, also from Julian. 3) Fix conntrack deletion via ctnetlink, from Felix Kaechele. 4) Fix reject for ICMP due to incorrect checksum handling, from He Zhe. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7c3d310d8f
@ -808,11 +808,12 @@ struct ipvs_master_sync_state {
|
||||
struct ip_vs_sync_buff *sync_buff;
|
||||
unsigned long sync_queue_len;
|
||||
unsigned int sync_queue_delay;
|
||||
struct task_struct *master_thread;
|
||||
struct delayed_work master_wakeup_work;
|
||||
struct netns_ipvs *ipvs;
|
||||
};
|
||||
|
||||
struct ip_vs_sync_thread_data;
|
||||
|
||||
/* How much time to keep dests in trash */
|
||||
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
|
||||
|
||||
@ -943,7 +944,8 @@ struct netns_ipvs {
|
||||
spinlock_t sync_lock;
|
||||
struct ipvs_master_sync_state *ms;
|
||||
spinlock_t sync_buff_lock;
|
||||
struct task_struct **backup_threads;
|
||||
struct ip_vs_sync_thread_data *master_tinfo;
|
||||
struct ip_vs_sync_thread_data *backup_tinfo;
|
||||
int threads_mask;
|
||||
volatile int sync_state;
|
||||
struct mutex sync_mutex;
|
||||
|
@ -2245,7 +2245,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
|
||||
static int __net_init __ip_vs_init(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs;
|
||||
int ret;
|
||||
|
||||
ipvs = net_generic(net, ip_vs_net_id);
|
||||
if (ipvs == NULL)
|
||||
@ -2277,17 +2276,11 @@ static int __net_init __ip_vs_init(struct net *net)
|
||||
if (ip_vs_sync_net_init(ipvs) < 0)
|
||||
goto sync_fail;
|
||||
|
||||
ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
if (ret < 0)
|
||||
goto hook_fail;
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Error handling
|
||||
*/
|
||||
|
||||
hook_fail:
|
||||
ip_vs_sync_net_cleanup(ipvs);
|
||||
sync_fail:
|
||||
ip_vs_conn_net_cleanup(ipvs);
|
||||
conn_fail:
|
||||
@ -2317,6 +2310,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
|
||||
net->ipvs = NULL;
|
||||
}
|
||||
|
||||
static int __net_init __ip_vs_dev_init(struct net *net)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
if (ret < 0)
|
||||
goto hook_fail;
|
||||
return 0;
|
||||
|
||||
hook_fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
@ -2336,6 +2342,7 @@ static struct pernet_operations ipvs_core_ops = {
|
||||
};
|
||||
|
||||
static struct pernet_operations ipvs_core_dev_ops = {
|
||||
.init = __ip_vs_dev_init,
|
||||
.exit = __ip_vs_dev_cleanup,
|
||||
};
|
||||
|
||||
|
@ -2396,9 +2396,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
|
||||
cfg.syncid = dm->syncid;
|
||||
ret = start_sync_thread(ipvs, &cfg, dm->state);
|
||||
} else {
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
ret = stop_sync_thread(ipvs, dm->state);
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
}
|
||||
goto out_dec;
|
||||
}
|
||||
@ -3515,10 +3513,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
|
||||
if (!attrs[IPVS_DAEMON_ATTR_STATE])
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
ret = stop_sync_thread(ipvs,
|
||||
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -195,6 +195,7 @@ union ip_vs_sync_conn {
|
||||
#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
|
||||
|
||||
struct ip_vs_sync_thread_data {
|
||||
struct task_struct *task;
|
||||
struct netns_ipvs *ipvs;
|
||||
struct socket *sock;
|
||||
char *buf;
|
||||
@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
|
||||
max(IPVS_SYNC_SEND_DELAY, 1));
|
||||
ms->sync_queue_len++;
|
||||
list_add_tail(&sb->list, &ms->sync_queue);
|
||||
if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
|
||||
wake_up_process(ms->master_thread);
|
||||
if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
|
||||
int id = (int)(ms - ipvs->ms);
|
||||
|
||||
wake_up_process(ipvs->master_tinfo[id].task);
|
||||
}
|
||||
} else
|
||||
ip_vs_sync_buff_release(sb);
|
||||
spin_unlock(&ipvs->sync_lock);
|
||||
@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
|
||||
spin_lock_bh(&ipvs->sync_lock);
|
||||
if (ms->sync_queue_len &&
|
||||
ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
|
||||
int id = (int)(ms - ipvs->ms);
|
||||
|
||||
ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
|
||||
wake_up_process(ms->master_thread);
|
||||
wake_up_process(ipvs->master_tinfo[id].task);
|
||||
}
|
||||
spin_unlock_bh(&ipvs->sync_lock);
|
||||
}
|
||||
@ -1703,10 +1709,6 @@ done:
|
||||
if (sb)
|
||||
ip_vs_sync_buff_release(sb);
|
||||
|
||||
/* release the sending multicast socket */
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
|
||||
}
|
||||
}
|
||||
|
||||
/* release the sending multicast socket */
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo->buf);
|
||||
kfree(tinfo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
|
||||
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
||||
int state)
|
||||
{
|
||||
struct ip_vs_sync_thread_data *tinfo = NULL;
|
||||
struct task_struct **array = NULL, *task;
|
||||
struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
|
||||
struct task_struct *task;
|
||||
struct net_device *dev;
|
||||
char *name;
|
||||
int (*threadfn)(void *data);
|
||||
@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
||||
threadfn = sync_thread_master;
|
||||
} else if (state == IP_VS_STATE_BACKUP) {
|
||||
result = -EEXIST;
|
||||
if (ipvs->backup_threads)
|
||||
if (ipvs->backup_tinfo)
|
||||
goto out_early;
|
||||
|
||||
ipvs->bcfg = *c;
|
||||
@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
||||
master_wakeup_work_handler);
|
||||
ms->ipvs = ipvs;
|
||||
}
|
||||
} else {
|
||||
array = kcalloc(count, sizeof(struct task_struct *),
|
||||
GFP_KERNEL);
|
||||
result = -ENOMEM;
|
||||
if (!array)
|
||||
goto out;
|
||||
}
|
||||
result = -ENOMEM;
|
||||
ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
|
||||
GFP_KERNEL);
|
||||
if (!ti)
|
||||
goto out;
|
||||
|
||||
for (id = 0; id < count; id++) {
|
||||
result = -ENOMEM;
|
||||
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
|
||||
if (!tinfo)
|
||||
goto out;
|
||||
tinfo = &ti[id];
|
||||
tinfo->ipvs = ipvs;
|
||||
tinfo->sock = NULL;
|
||||
if (state == IP_VS_STATE_BACKUP) {
|
||||
result = -ENOMEM;
|
||||
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
|
||||
GFP_KERNEL);
|
||||
if (!tinfo->buf)
|
||||
goto out;
|
||||
} else {
|
||||
tinfo->buf = NULL;
|
||||
}
|
||||
tinfo->id = id;
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
||||
result = PTR_ERR(task);
|
||||
goto out;
|
||||
}
|
||||
tinfo = NULL;
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
ipvs->ms[id].master_thread = task;
|
||||
else
|
||||
array[id] = task;
|
||||
tinfo->task = task;
|
||||
}
|
||||
|
||||
/* mark as active */
|
||||
|
||||
if (state == IP_VS_STATE_BACKUP)
|
||||
ipvs->backup_threads = array;
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
ipvs->master_tinfo = ti;
|
||||
else
|
||||
ipvs->backup_tinfo = ti;
|
||||
spin_lock_bh(&ipvs->sync_buff_lock);
|
||||
ipvs->sync_state |= state;
|
||||
spin_unlock_bh(&ipvs->sync_buff_lock);
|
||||
@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
|
||||
|
||||
out:
|
||||
/* We do not need RTNL lock anymore, release it here so that
|
||||
* sock_release below and in the kthreads can use rtnl_lock
|
||||
* to leave the mcast group.
|
||||
* sock_release below can use rtnl_lock to leave the mcast group.
|
||||
*/
|
||||
rtnl_unlock();
|
||||
count = id;
|
||||
while (count-- > 0) {
|
||||
if (state == IP_VS_STATE_MASTER)
|
||||
kthread_stop(ipvs->ms[count].master_thread);
|
||||
else
|
||||
kthread_stop(array[count]);
|
||||
id = min(id, count - 1);
|
||||
if (ti) {
|
||||
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
|
||||
if (tinfo->task)
|
||||
kthread_stop(tinfo->task);
|
||||
}
|
||||
}
|
||||
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
|
||||
kfree(ipvs->ms);
|
||||
ipvs->ms = NULL;
|
||||
}
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
if (tinfo) {
|
||||
if (tinfo->sock)
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo->buf);
|
||||
kfree(tinfo);
|
||||
|
||||
/* No more mutexes, release socks */
|
||||
if (ti) {
|
||||
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
|
||||
if (tinfo->sock)
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo->buf);
|
||||
}
|
||||
kfree(ti);
|
||||
}
|
||||
kfree(array);
|
||||
return result;
|
||||
|
||||
out_early:
|
||||
@ -1944,15 +1935,18 @@ out_early:
|
||||
|
||||
int stop_sync_thread(struct netns_ipvs *ipvs, int state)
|
||||
{
|
||||
struct task_struct **array;
|
||||
struct ip_vs_sync_thread_data *ti, *tinfo;
|
||||
int id;
|
||||
int retc = -EINVAL;
|
||||
|
||||
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
|
||||
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
if (state == IP_VS_STATE_MASTER) {
|
||||
retc = -ESRCH;
|
||||
if (!ipvs->ms)
|
||||
return -ESRCH;
|
||||
goto err;
|
||||
ti = ipvs->master_tinfo;
|
||||
|
||||
/*
|
||||
* The lock synchronizes with sb_queue_tail(), so that we don't
|
||||
@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
|
||||
struct ipvs_master_sync_state *ms = &ipvs->ms[id];
|
||||
int ret;
|
||||
|
||||
tinfo = &ti[id];
|
||||
pr_info("stopping master sync thread %d ...\n",
|
||||
task_pid_nr(ms->master_thread));
|
||||
task_pid_nr(tinfo->task));
|
||||
cancel_delayed_work_sync(&ms->master_wakeup_work);
|
||||
ret = kthread_stop(ms->master_thread);
|
||||
ret = kthread_stop(tinfo->task);
|
||||
if (retc >= 0)
|
||||
retc = ret;
|
||||
}
|
||||
kfree(ipvs->ms);
|
||||
ipvs->ms = NULL;
|
||||
ipvs->master_tinfo = NULL;
|
||||
} else if (state == IP_VS_STATE_BACKUP) {
|
||||
if (!ipvs->backup_threads)
|
||||
return -ESRCH;
|
||||
retc = -ESRCH;
|
||||
if (!ipvs->backup_tinfo)
|
||||
goto err;
|
||||
ti = ipvs->backup_tinfo;
|
||||
|
||||
ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
|
||||
array = ipvs->backup_threads;
|
||||
retc = 0;
|
||||
for (id = ipvs->threads_mask; id >= 0; id--) {
|
||||
int ret;
|
||||
|
||||
tinfo = &ti[id];
|
||||
pr_info("stopping backup sync thread %d ...\n",
|
||||
task_pid_nr(array[id]));
|
||||
ret = kthread_stop(array[id]);
|
||||
task_pid_nr(tinfo->task));
|
||||
ret = kthread_stop(tinfo->task);
|
||||
if (retc >= 0)
|
||||
retc = ret;
|
||||
}
|
||||
kfree(array);
|
||||
ipvs->backup_threads = NULL;
|
||||
ipvs->backup_tinfo = NULL;
|
||||
} else {
|
||||
goto err;
|
||||
}
|
||||
id = ipvs->threads_mask;
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
|
||||
/* No more mutexes, release socks */
|
||||
for (tinfo = ti + id; tinfo >= ti; tinfo--) {
|
||||
if (tinfo->sock)
|
||||
sock_release(tinfo->sock);
|
||||
kfree(tinfo->buf);
|
||||
}
|
||||
kfree(ti);
|
||||
|
||||
/* decrease the module use count */
|
||||
ip_vs_use_count_dec();
|
||||
return retc;
|
||||
|
||||
err:
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
return retc;
|
||||
}
|
||||
|
||||
@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
|
||||
{
|
||||
int retc;
|
||||
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
|
||||
if (retc && retc != -ESRCH)
|
||||
pr_err("Failed to stop Master Daemon\n");
|
||||
@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
|
||||
retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
|
||||
if (retc && retc != -ESRCH)
|
||||
pr_err("Failed to stop Backup Daemon\n");
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
}
|
||||
|
@ -1256,7 +1256,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conn *ct;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
|
||||
struct nf_conntrack_zone zone;
|
||||
int err;
|
||||
|
||||
@ -1266,11 +1265,13 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
|
||||
|
||||
if (cda[CTA_TUPLE_ORIG])
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
|
||||
u3, &zone);
|
||||
nfmsg->nfgen_family, &zone);
|
||||
else if (cda[CTA_TUPLE_REPLY])
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
|
||||
u3, &zone);
|
||||
nfmsg->nfgen_family, &zone);
|
||||
else {
|
||||
u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
|
||||
|
||||
return ctnetlink_flush_conntrack(net, cda,
|
||||
NETLINK_CB(skb).portid,
|
||||
nlmsg_report(nlh), u3);
|
||||
|
@ -218,7 +218,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
|
||||
/* See ip_conntrack_proto_tcp.c */
|
||||
if (state->net->ct.sysctl_checksum &&
|
||||
state->hook == NF_INET_PRE_ROUTING &&
|
||||
nf_ip_checksum(skb, state->hook, dataoff, 0)) {
|
||||
nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
|
||||
icmp_error_log(skb, state, "bad hw icmp checksum");
|
||||
return -NF_ACCEPT;
|
||||
}
|
||||
|
@ -564,7 +564,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
|
||||
|
||||
if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
|
||||
return 0;
|
||||
if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
|
||||
if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
|
||||
return 0;
|
||||
|
||||
inside = (void *)skb->data + hdrlen;
|
||||
|
@ -17,7 +17,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
case CHECKSUM_COMPLETE:
|
||||
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
|
||||
break;
|
||||
if ((protocol == 0 && !csum_fold(skb->csum)) ||
|
||||
if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
|
||||
!csum_fold(skb->csum)) ||
|
||||
!csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
skb->len - dataoff, protocol,
|
||||
skb->csum)) {
|
||||
@ -26,7 +27,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
|
||||
}
|
||||
/* fall through */
|
||||
case CHECKSUM_NONE:
|
||||
if (protocol == 0)
|
||||
if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
|
||||
skb->csum = 0;
|
||||
else
|
||||
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
|
||||
|
Loading…
Reference in New Issue
Block a user