mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
tipc: fix race between poll() and setsockopt()
Letting tipc_poll() dereference a socket's pointer to struct tipc_group entails a race risk, as the group item may be deleted in a concurrent tipc_sk_join() or tipc_sk_leave() thread. We now move the 'open' flag in struct tipc_group to struct tipc_sock, and let the former retain only a pointer to the moved field. This will eliminate the race risk. Reported-by: syzbot+799dafde0286795858ac@syzkaller.appspotmail.com Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
30c3e9d470
commit
60c2530696
@ -93,26 +93,21 @@ struct tipc_group {
|
||||
u16 max_active;
|
||||
u16 bc_snd_nxt;
|
||||
u16 bc_ackers;
|
||||
bool *open;
|
||||
bool loopback;
|
||||
bool events;
|
||||
bool open;
|
||||
};
|
||||
|
||||
static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
|
||||
int mtyp, struct sk_buff_head *xmitq);
|
||||
|
||||
bool tipc_group_is_open(struct tipc_group *grp)
|
||||
{
|
||||
return grp->open;
|
||||
}
|
||||
|
||||
static void tipc_group_open(struct tipc_member *m, bool *wakeup)
|
||||
{
|
||||
*wakeup = false;
|
||||
if (list_empty(&m->small_win))
|
||||
return;
|
||||
list_del_init(&m->small_win);
|
||||
m->group->open = true;
|
||||
*m->group->open = true;
|
||||
*wakeup = true;
|
||||
}
|
||||
|
||||
@ -170,7 +165,8 @@ int tipc_group_size(struct tipc_group *grp)
|
||||
}
|
||||
|
||||
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
|
||||
struct tipc_group_req *mreq)
|
||||
struct tipc_group_req *mreq,
|
||||
bool *group_is_open)
|
||||
{
|
||||
u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
|
||||
bool global = mreq->scope != TIPC_NODE_SCOPE;
|
||||
@ -192,6 +188,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
|
||||
grp->scope = mreq->scope;
|
||||
grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
|
||||
grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
|
||||
grp->open = group_is_open;
|
||||
filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
|
||||
if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
|
||||
filter, &grp->subid))
|
||||
@ -430,7 +427,7 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
|
||||
if (m->window >= len)
|
||||
return false;
|
||||
|
||||
grp->open = false;
|
||||
*grp->open = false;
|
||||
|
||||
/* If not fully advertised, do it now to prevent mutual blocking */
|
||||
adv = m->advertised;
|
||||
@ -453,7 +450,7 @@ bool tipc_group_bc_cong(struct tipc_group *grp, int len)
|
||||
|
||||
/* If prev bcast was replicast, reject until all receivers have acked */
|
||||
if (grp->bc_ackers) {
|
||||
grp->open = false;
|
||||
*grp->open = false;
|
||||
return true;
|
||||
}
|
||||
if (list_empty(&grp->small_win))
|
||||
@ -800,7 +797,7 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
|
||||
if (--grp->bc_ackers)
|
||||
return;
|
||||
list_del_init(&m->small_win);
|
||||
m->group->open = true;
|
||||
*m->group->open = true;
|
||||
*usr_wakeup = true;
|
||||
tipc_group_update_member(m, 0);
|
||||
return;
|
||||
|
@ -43,7 +43,8 @@ struct tipc_member;
|
||||
struct tipc_msg;
|
||||
|
||||
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
|
||||
struct tipc_group_req *mreq);
|
||||
struct tipc_group_req *mreq,
|
||||
bool *group_is_open);
|
||||
void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf);
|
||||
void tipc_group_delete(struct net *net, struct tipc_group *grp);
|
||||
void tipc_group_add_member(struct tipc_group *grp, u32 node,
|
||||
@ -67,7 +68,6 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack);
|
||||
bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
|
||||
int len, struct tipc_member **m);
|
||||
bool tipc_group_bc_cong(struct tipc_group *grp, int len);
|
||||
bool tipc_group_is_open(struct tipc_group *grp);
|
||||
void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
|
||||
u32 port, struct sk_buff_head *xmitq);
|
||||
u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
|
||||
|
@ -116,6 +116,7 @@ struct tipc_sock {
|
||||
struct tipc_mc_method mc_method;
|
||||
struct rcu_head rcu;
|
||||
struct tipc_group *group;
|
||||
bool group_is_open;
|
||||
};
|
||||
|
||||
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
@ -715,7 +716,6 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct tipc_sock *tsk = tipc_sk(sk);
|
||||
struct tipc_group *grp;
|
||||
u32 revents = 0;
|
||||
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
@ -736,8 +736,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
|
||||
revents |= POLLIN | POLLRDNORM;
|
||||
break;
|
||||
case TIPC_OPEN:
|
||||
grp = tsk->group;
|
||||
if ((!grp || tipc_group_is_open(grp)) && !tsk->cong_link_cnt)
|
||||
if (tsk->group_is_open && !tsk->cong_link_cnt)
|
||||
revents |= POLLOUT;
|
||||
if (!tipc_sk_type_connectionless(sk))
|
||||
break;
|
||||
@ -2758,7 +2757,7 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
|
||||
return -EINVAL;
|
||||
if (grp)
|
||||
return -EACCES;
|
||||
grp = tipc_group_create(net, tsk->portid, mreq);
|
||||
grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
|
||||
if (!grp)
|
||||
return -ENOMEM;
|
||||
tsk->group = grp;
|
||||
|
Loading…
Reference in New Issue
Block a user