2020-03-27 21:48:51 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Multipath TCP
|
|
|
|
*
|
|
|
|
* Copyright (c) 2020, Red Hat, Inc.
|
|
|
|
*/
|
|
|
|
|
2020-04-03 09:14:08 +00:00
|
|
|
#define pr_fmt(fmt) "MPTCP: " fmt
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/kernel.h>
|
2023-08-11 15:57:24 +00:00
|
|
|
#include <net/inet_common.h>
|
2020-03-27 21:48:51 +00:00
|
|
|
#include <net/netns/generic.h>
|
|
|
|
#include <net/mptcp.h>
|
|
|
|
|
|
|
|
#include "protocol.h"
|
2020-09-24 00:29:56 +00:00
|
|
|
#include "mib.h"
|
2024-05-14 01:13:31 +00:00
|
|
|
#include "mptcp_pm_gen.h"
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
static int pm_nl_pernet_id;
|
|
|
|
|
2020-09-24 00:30:00 +00:00
|
|
|
struct mptcp_pm_add_entry {
|
|
|
|
struct list_head list;
|
|
|
|
struct mptcp_addr_info addr;
|
2023-06-18 09:46:46 +00:00
|
|
|
u8 retrans_times;
|
2020-09-24 00:30:02 +00:00
|
|
|
struct timer_list add_timer;
|
|
|
|
struct mptcp_sock *sock;
|
2020-09-24 00:30:00 +00:00
|
|
|
};
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
struct pm_nl_pernet {
|
|
|
|
/* protects pernet updates */
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head local_addr_list;
|
|
|
|
unsigned int addrs;
|
2021-08-13 22:15:45 +00:00
|
|
|
unsigned int stale_loss_cnt;
|
2020-03-27 21:48:51 +00:00
|
|
|
unsigned int add_addr_signal_max;
|
|
|
|
unsigned int add_addr_accept_max;
|
|
|
|
unsigned int local_addr_max;
|
|
|
|
unsigned int subflows_max;
|
|
|
|
unsigned int next_id;
|
2022-01-07 00:20:22 +00:00
|
|
|
DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
|
2020-03-27 21:48:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define MPTCP_PM_ADDR_MAX 8
|
2020-09-24 00:30:02 +00:00
|
|
|
#define ADD_ADDR_RETRANS_MAX 3
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2022-04-08 19:45:57 +00:00
|
|
|
static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net)
|
|
|
|
{
|
|
|
|
return net_generic(net, pm_nl_pernet_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pm_nl_pernet *
|
|
|
|
pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
|
|
|
|
{
|
|
|
|
return pm_nl_get_pernet(sock_net((struct sock *)msk));
|
|
|
|
}
|
|
|
|
|
2022-05-04 02:38:49 +00:00
|
|
|
bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
|
|
|
|
const struct mptcp_addr_info *b, bool use_port)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
bool addr_equals = false;
|
|
|
|
|
2021-01-25 18:59:01 +00:00
|
|
|
if (a->family == b->family) {
|
|
|
|
if (a->family == AF_INET)
|
|
|
|
addr_equals = a->addr.s_addr == b->addr.s_addr;
|
2020-03-27 21:48:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
2021-01-25 18:59:01 +00:00
|
|
|
else
|
|
|
|
addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6);
|
|
|
|
} else if (a->family == AF_INET) {
|
|
|
|
if (ipv6_addr_v4mapped(&b->addr6))
|
|
|
|
addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3];
|
|
|
|
} else if (b->family == AF_INET) {
|
|
|
|
if (ipv6_addr_v4mapped(&a->addr6))
|
|
|
|
addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr;
|
2020-03-27 21:48:51 +00:00
|
|
|
#endif
|
2021-01-25 18:59:01 +00:00
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
if (!addr_equals)
|
|
|
|
return false;
|
|
|
|
if (!use_port)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return a->port == b->port;
|
|
|
|
}
|
|
|
|
|
2023-06-08 13:20:49 +00:00
|
|
|
void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
addr->family = skc->skc_family;
|
2021-02-01 23:09:15 +00:00
|
|
|
addr->port = htons(skc->skc_num);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (addr->family == AF_INET)
|
|
|
|
addr->addr.s_addr = skc->skc_rcv_saddr;
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
else if (addr->family == AF_INET6)
|
|
|
|
addr->addr6 = skc->skc_v6_rcv_saddr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remote_address(const struct sock_common *skc,
|
|
|
|
struct mptcp_addr_info *addr)
|
|
|
|
{
|
|
|
|
addr->family = skc->skc_family;
|
|
|
|
addr->port = skc->skc_dport;
|
|
|
|
if (addr->family == AF_INET)
|
|
|
|
addr->addr.s_addr = skc->skc_daddr;
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
else if (addr->family == AF_INET6)
|
|
|
|
addr->addr6 = skc->skc_v6_daddr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool lookup_subflow_by_saddr(const struct list_head *list,
|
2022-02-16 02:11:28 +00:00
|
|
|
const struct mptcp_addr_info *saddr)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct mptcp_subflow_context *subflow;
|
|
|
|
struct mptcp_addr_info cur;
|
|
|
|
struct sock_common *skc;
|
|
|
|
|
|
|
|
list_for_each_entry(subflow, list, node) {
|
|
|
|
skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
|
|
|
|
|
2023-06-08 13:20:49 +00:00
|
|
|
mptcp_local_address(skc, &cur);
|
2022-05-04 02:38:49 +00:00
|
|
|
if (mptcp_addresses_equal(&cur, saddr, saddr->port))
|
2020-03-27 21:48:51 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-26 18:26:32 +00:00
|
|
|
static bool lookup_subflow_by_daddr(const struct list_head *list,
|
2022-02-16 02:11:28 +00:00
|
|
|
const struct mptcp_addr_info *daddr)
|
2021-03-26 18:26:32 +00:00
|
|
|
{
|
|
|
|
struct mptcp_subflow_context *subflow;
|
|
|
|
struct mptcp_addr_info cur;
|
|
|
|
struct sock_common *skc;
|
|
|
|
|
|
|
|
list_for_each_entry(subflow, list, node) {
|
|
|
|
skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
|
|
|
|
|
|
|
|
remote_address(skc, &cur);
|
2022-05-04 02:38:49 +00:00
|
|
|
if (mptcp_addresses_equal(&cur, daddr, daddr->port))
|
2021-03-26 18:26:32 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
static struct mptcp_pm_addr_entry *
|
|
|
|
select_local_address(const struct pm_nl_pernet *pernet,
|
2022-02-16 02:11:28 +00:00
|
|
|
const struct mptcp_sock *msk)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct mptcp_pm_addr_entry *entry, *ret = NULL;
|
|
|
|
|
2021-02-04 23:23:30 +00:00
|
|
|
msk_owned_by_me(msk);
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
|
2021-04-07 00:15:57 +00:00
|
|
|
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
|
2020-03-27 21:48:51 +00:00
|
|
|
continue;
|
|
|
|
|
2022-01-07 00:20:22 +00:00
|
|
|
if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = entry;
|
|
|
|
break;
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mptcp_pm_addr_entry *
|
2022-02-16 02:11:28 +00:00
|
|
|
select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct mptcp_pm_addr_entry *entry, *ret = NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
/* do not keep any additional per socket state, just signal
|
|
|
|
* the address list in order.
|
|
|
|
* Note: removal from the local address list during the msk life-cycle
|
|
|
|
* can lead to additional addresses not being announced.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
|
2022-01-07 00:20:22 +00:00
|
|
|
if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
|
|
|
|
continue;
|
|
|
|
|
2021-04-07 00:15:57 +00:00
|
|
|
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
|
2020-03-27 21:48:51 +00:00
|
|
|
continue;
|
2022-01-07 00:20:22 +00:00
|
|
|
|
|
|
|
ret = entry;
|
|
|
|
break;
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-16 02:11:28 +00:00
|
|
|
unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
|
2021-02-01 23:09:07 +00:00
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
2021-02-01 23:09:07 +00:00
|
|
|
|
|
|
|
return READ_ONCE(pernet->add_addr_signal_max);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
|
|
|
|
|
2022-02-16 02:11:28 +00:00
|
|
|
unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
|
2021-02-01 23:09:07 +00:00
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
2021-02-01 23:09:07 +00:00
|
|
|
|
|
|
|
return READ_ONCE(pernet->add_addr_accept_max);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
|
|
|
|
|
2022-02-16 02:11:28 +00:00
|
|
|
unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
|
2021-02-01 23:09:07 +00:00
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
2021-02-01 23:09:07 +00:00
|
|
|
|
|
|
|
return READ_ONCE(pernet->subflows_max);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
|
|
|
|
|
2022-02-16 02:11:28 +00:00
|
|
|
unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
|
2021-02-01 23:09:07 +00:00
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
2021-02-01 23:09:07 +00:00
|
|
|
|
|
|
|
return READ_ONCE(pernet->local_addr_max);
|
|
|
|
}
|
2021-02-13 00:52:02 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
|
2021-02-01 23:09:07 +00:00
|
|
|
|
2022-01-07 00:20:23 +00:00
|
|
|
bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
2022-01-07 00:20:22 +00:00
|
|
|
|
|
|
|
if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
|
|
|
|
(find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
|
2022-01-07 00:20:23 +00:00
|
|
|
MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
|
2020-03-27 21:48:51 +00:00
|
|
|
WRITE_ONCE(msk->pm.work_pending, false);
|
2022-01-07 00:20:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 18:26:37 +00:00
|
|
|
struct mptcp_pm_add_entry *
|
2022-02-16 02:11:28 +00:00
|
|
|
mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
|
|
|
|
const struct mptcp_addr_info *addr)
|
2020-09-24 00:29:54 +00:00
|
|
|
{
|
2020-09-24 00:30:00 +00:00
|
|
|
struct mptcp_pm_add_entry *entry;
|
2020-09-24 00:29:54 +00:00
|
|
|
|
2021-02-04 23:23:30 +00:00
|
|
|
lockdep_assert_held(&msk->pm.lock);
|
|
|
|
|
2020-09-24 00:29:54 +00:00
|
|
|
list_for_each_entry(entry, &msk->pm.anno_list, list) {
|
2022-05-04 02:38:49 +00:00
|
|
|
if (mptcp_addresses_equal(&entry->addr, addr, true))
|
2020-09-24 00:30:02 +00:00
|
|
|
return entry;
|
2020-09-24 00:29:54 +00:00
|
|
|
}
|
|
|
|
|
2020-09-24 00:30:02 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-02-01 23:09:15 +00:00
|
|
|
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
|
|
|
|
{
|
|
|
|
struct mptcp_pm_add_entry *entry;
|
|
|
|
struct mptcp_addr_info saddr;
|
|
|
|
bool ret = false;
|
|
|
|
|
2023-06-08 13:20:49 +00:00
|
|
|
mptcp_local_address((struct sock_common *)sk, &saddr);
|
2021-02-01 23:09:15 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
|
|
list_for_each_entry(entry, &msk->pm.anno_list, list) {
|
2022-05-04 02:38:49 +00:00
|
|
|
if (mptcp_addresses_equal(&entry->addr, &saddr, true)) {
|
2021-02-01 23:09:15 +00:00
|
|
|
ret = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-09-24 00:30:02 +00:00
|
|
|
static void mptcp_pm_add_timer(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
|
|
|
|
struct mptcp_sock *msk = entry->sock;
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
|
|
|
|
pr_debug("msk=%p", msk);
|
|
|
|
|
|
|
|
if (!msk)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (inet_sk_state_load(sk) == TCP_CLOSE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!entry->addr.id)
|
|
|
|
return;
|
|
|
|
|
2021-08-24 01:05:40 +00:00
|
|
|
if (mptcp_pm_should_add_signal_addr(msk)) {
|
2020-09-24 00:30:02 +00:00
|
|
|
sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
|
|
|
2021-08-24 01:05:40 +00:00
|
|
|
if (!mptcp_pm_should_add_signal_addr(msk)) {
|
2020-09-24 00:30:02 +00:00
|
|
|
pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
|
2021-03-26 18:26:31 +00:00
|
|
|
mptcp_pm_announce_addr(msk, &entry->addr, false);
|
2020-11-19 19:46:00 +00:00
|
|
|
mptcp_pm_add_addr_send_ack(msk);
|
2020-09-24 00:30:02 +00:00
|
|
|
entry->retrans_times++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
|
2020-11-03 19:05:07 +00:00
|
|
|
sk_reset_timer(sk, timer,
|
2020-11-10 03:01:43 +00:00
|
|
|
jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
|
2020-09-24 00:30:02 +00:00
|
|
|
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
|
2021-03-26 18:26:34 +00:00
|
|
|
if (entry->retrans_times == ADD_ADDR_RETRANS_MAX)
|
|
|
|
mptcp_pm_subflow_established(msk);
|
|
|
|
|
2020-09-24 00:30:02 +00:00
|
|
|
out:
|
|
|
|
__sock_put(sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mptcp_pm_add_entry *
|
|
|
|
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
|
2022-02-16 02:11:28 +00:00
|
|
|
const struct mptcp_addr_info *addr, bool check_id)
|
2020-09-24 00:30:02 +00:00
|
|
|
{
|
|
|
|
struct mptcp_pm_add_entry *entry;
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2021-03-26 18:26:37 +00:00
|
|
|
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
|
mptcp: validate 'id' when stopping the ADD_ADDR retransmit timer
when Linux receives an echo-ed ADD_ADDR, it checks the IP address against
the list of "announced" addresses. In case of a positive match, the timer
that handles retransmissions is stopped regardless of the 'Address Id' in
the received packet: this behaviour does not comply with RFC8684 3.4.1.
Fix it by validating the 'Address Id' in received echo-ed ADD_ADDRs.
Tested using packetdrill, with the following captured output:
unpatched kernel:
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0xfd2e62517888fe29,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 1.2.3.4,mptcp dss ack 3013740213], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0xfd2e62517888fe29,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 90 198.51.100.2,mptcp dss ack 3013740213], length 0
^^^ retransmission is stopped here, but 'Address Id' is 90
patched kernel:
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 1.2.3.4,mptcp dss ack 1672384568], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 90 198.51.100.2,mptcp dss ack 1672384568], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 198.51.100.2,mptcp dss ack 1672384568], length 0
^^^ retransmission is stopped here, only when both 'Address Id' and 'IP Address' match
Fixes: 00cfd77b9063 ("mptcp: retransmit ADD_ADDR when timeout")
Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-05-25 21:23:13 +00:00
|
|
|
if (entry && (!check_id || entry->addr.id == addr->id))
|
2020-09-24 00:30:02 +00:00
|
|
|
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
|
mptcp: validate 'id' when stopping the ADD_ADDR retransmit timer
when Linux receives an echo-ed ADD_ADDR, it checks the IP address against
the list of "announced" addresses. In case of a positive match, the timer
that handles retransmissions is stopped regardless of the 'Address Id' in
the received packet: this behaviour does not comply with RFC8684 3.4.1.
Fix it by validating the 'Address Id' in received echo-ed ADD_ADDRs.
Tested using packetdrill, with the following captured output:
unpatched kernel:
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0xfd2e62517888fe29,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 1.2.3.4,mptcp dss ack 3013740213], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0xfd2e62517888fe29,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 90 198.51.100.2,mptcp dss ack 3013740213], length 0
^^^ retransmission is stopped here, but 'Address Id' is 90
patched kernel:
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 1.2.3.4,mptcp dss ack 1672384568], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 90 198.51.100.2,mptcp dss ack 1672384568], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 198.51.100.2,mptcp dss ack 1672384568], length 0
^^^ retransmission is stopped here, only when both 'Address Id' and 'IP Address' match
Fixes: 00cfd77b9063 ("mptcp: retransmit ADD_ADDR when timeout")
Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-05-25 21:23:13 +00:00
|
|
|
if (entry && (!check_id || entry->addr.id == addr->id))
|
2020-09-24 00:30:02 +00:00
|
|
|
sk_stop_timer_sync(sk, &entry->add_timer);
|
|
|
|
|
|
|
|
return entry;
|
2020-09-24 00:29:54 +00:00
|
|
|
}
|
|
|
|
|
2022-05-04 02:38:52 +00:00
|
|
|
bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
|
2023-06-20 16:30:22 +00:00
|
|
|
const struct mptcp_addr_info *addr)
|
2020-09-24 00:29:54 +00:00
|
|
|
{
|
2020-09-24 00:30:00 +00:00
|
|
|
struct mptcp_pm_add_entry *add_entry = NULL;
|
2020-09-24 00:30:02 +00:00
|
|
|
struct sock *sk = (struct sock *)msk;
|
2020-11-03 19:05:07 +00:00
|
|
|
struct net *net = sock_net(sk);
|
2020-09-24 00:29:54 +00:00
|
|
|
|
2021-02-04 23:23:30 +00:00
|
|
|
lockdep_assert_held(&msk->pm.lock);
|
|
|
|
|
2023-06-20 16:30:22 +00:00
|
|
|
add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
|
2022-05-02 20:52:37 +00:00
|
|
|
|
|
|
|
if (add_entry) {
|
2024-07-31 11:05:56 +00:00
|
|
|
if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
|
2022-05-02 20:52:37 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
sk_reset_timer(sk, &add_entry->add_timer,
|
|
|
|
jiffies + mptcp_get_add_addr_timeout(net));
|
|
|
|
return true;
|
|
|
|
}
|
2020-09-24 00:29:54 +00:00
|
|
|
|
2020-09-24 00:30:00 +00:00
|
|
|
add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
|
|
|
|
if (!add_entry)
|
2020-09-24 00:29:54 +00:00
|
|
|
return false;
|
|
|
|
|
2020-09-24 00:30:00 +00:00
|
|
|
list_add(&add_entry->list, &msk->pm.anno_list);
|
2020-09-24 00:29:54 +00:00
|
|
|
|
2023-06-20 16:30:22 +00:00
|
|
|
add_entry->addr = *addr;
|
2020-09-24 00:30:02 +00:00
|
|
|
add_entry->sock = msk;
|
|
|
|
add_entry->retrans_times = 0;
|
|
|
|
|
|
|
|
timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
|
2020-11-03 19:05:07 +00:00
|
|
|
sk_reset_timer(sk, &add_entry->add_timer,
|
|
|
|
jiffies + mptcp_get_add_addr_timeout(net));
|
2020-09-24 00:30:02 +00:00
|
|
|
|
2020-09-24 00:29:54 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
|
|
|
|
{
|
2020-09-24 00:30:00 +00:00
|
|
|
struct mptcp_pm_add_entry *entry, *tmp;
|
2020-09-24 00:30:02 +00:00
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
LIST_HEAD(free_list);
|
2020-09-24 00:29:54 +00:00
|
|
|
|
|
|
|
pr_debug("msk=%p", msk);
|
|
|
|
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2020-09-24 00:30:02 +00:00
|
|
|
list_splice_init(&msk->pm.anno_list, &free_list);
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, &free_list, list) {
|
|
|
|
sk_stop_timer_sync(sk, &entry->add_timer);
|
2020-09-24 00:29:54 +00:00
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-17 22:07:23 +00:00
|
|
|
/* Fill all the remote addresses into the array addrs[],
|
|
|
|
* and return the array size.
|
|
|
|
*/
|
2023-01-25 10:47:21 +00:00
|
|
|
static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
|
|
|
|
struct mptcp_addr_info *local,
|
|
|
|
bool fullmesh,
|
2021-08-17 22:07:23 +00:00
|
|
|
struct mptcp_addr_info *addrs)
|
|
|
|
{
|
2022-01-07 00:20:23 +00:00
|
|
|
bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
|
2021-08-17 22:07:23 +00:00
|
|
|
struct sock *sk = (struct sock *)msk, *ssk;
|
|
|
|
struct mptcp_subflow_context *subflow;
|
|
|
|
struct mptcp_addr_info remote = { 0 };
|
|
|
|
unsigned int subflows_max;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
subflows_max = mptcp_pm_get_subflows_max(msk);
|
2022-01-07 00:20:23 +00:00
|
|
|
remote_address((struct sock_common *)sk, &remote);
|
2021-08-17 22:07:23 +00:00
|
|
|
|
|
|
|
/* Non-fullmesh endpoint, fill in the single entry
|
|
|
|
* corresponding to the primary MPC subflow remote address
|
|
|
|
*/
|
|
|
|
if (!fullmesh) {
|
2022-01-07 00:20:23 +00:00
|
|
|
if (deny_id0)
|
|
|
|
return 0;
|
|
|
|
|
2023-01-25 10:47:21 +00:00
|
|
|
if (!mptcp_pm_addr_families_match(sk, local, &remote))
|
|
|
|
return 0;
|
|
|
|
|
2021-08-17 22:07:23 +00:00
|
|
|
msk->pm.subflows++;
|
|
|
|
addrs[i++] = remote;
|
|
|
|
} else {
|
2024-02-15 18:25:33 +00:00
|
|
|
DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
|
|
|
|
|
|
|
|
/* Forbid creation of new subflows matching existing
|
|
|
|
* ones, possibly already created by incoming ADD_ADDR
|
|
|
|
*/
|
|
|
|
bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
|
|
|
|
mptcp_for_each_subflow(msk, subflow)
|
|
|
|
if (READ_ONCE(subflow->local_id) == local->id)
|
|
|
|
__set_bit(subflow->remote_id, unavail_id);
|
|
|
|
|
2021-08-17 22:07:23 +00:00
|
|
|
mptcp_for_each_subflow(msk, subflow) {
|
|
|
|
ssk = mptcp_subflow_tcp_sock(subflow);
|
2022-01-07 00:20:23 +00:00
|
|
|
remote_address((struct sock_common *)ssk, &addrs[i]);
|
2024-02-15 18:25:32 +00:00
|
|
|
addrs[i].id = READ_ONCE(subflow->remote_id);
|
2022-07-11 19:16:30 +00:00
|
|
|
if (deny_id0 && !addrs[i].id)
|
2022-01-07 00:20:23 +00:00
|
|
|
continue;
|
|
|
|
|
2024-02-15 18:25:33 +00:00
|
|
|
if (test_bit(addrs[i].id, unavail_id))
|
|
|
|
continue;
|
|
|
|
|
2023-01-25 10:47:21 +00:00
|
|
|
if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
|
|
|
|
continue;
|
|
|
|
|
2024-02-15 18:25:33 +00:00
|
|
|
if (msk->pm.subflows < subflows_max) {
|
|
|
|
/* forbid creating multiple address towards
|
|
|
|
* this id
|
|
|
|
*/
|
|
|
|
__set_bit(addrs[i].id, unavail_id);
|
2021-08-17 22:07:23 +00:00
|
|
|
msk->pm.subflows++;
|
2022-01-07 00:20:23 +00:00
|
|
|
i++;
|
2021-08-17 22:07:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2022-07-11 19:16:29 +00:00
|
|
|
static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
|
|
|
|
bool prio, bool backup)
|
|
|
|
{
|
|
|
|
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
|
|
|
bool slow;
|
|
|
|
|
|
|
|
pr_debug("send ack for %s",
|
|
|
|
prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
|
|
|
|
|
|
|
|
slow = lock_sock_fast(ssk);
|
|
|
|
if (prio) {
|
|
|
|
subflow->send_mp_prio = 1;
|
|
|
|
subflow->request_bkup = backup;
|
|
|
|
}
|
|
|
|
|
|
|
|
__mptcp_subflow_send_ack(ssk);
|
|
|
|
unlock_sock_fast(ssk, slow);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
|
|
|
|
bool prio, bool backup)
|
|
|
|
{
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
__mptcp_pm_send_ack(msk, subflow, prio, backup);
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
|
|
}
|
|
|
|
|
2022-01-07 00:20:22 +00:00
|
|
|
static struct mptcp_pm_addr_entry *
|
|
|
|
__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
|
|
|
|
{
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pernet->local_addr_list, list) {
|
|
|
|
if (entry->addr.id == id)
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-01-21 00:35:27 +00:00
|
|
|
static struct mptcp_pm_addr_entry *
|
2024-03-05 11:04:33 +00:00
|
|
|
__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
|
2022-01-21 00:35:27 +00:00
|
|
|
{
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pernet->local_addr_list, list) {
|
2024-03-05 11:04:33 +00:00
|
|
|
if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
|
2022-01-21 00:35:27 +00:00
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
|
|
|
|
{
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
struct mptcp_pm_addr_entry *local;
|
2021-02-01 23:09:07 +00:00
|
|
|
unsigned int add_addr_signal_max;
|
|
|
|
unsigned int local_addr_max;
|
2020-03-27 21:48:51 +00:00
|
|
|
struct pm_nl_pernet *pernet;
|
2021-02-01 23:09:07 +00:00
|
|
|
unsigned int subflows_max;
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2022-04-08 19:45:57 +00:00
|
|
|
pernet = pm_nl_get_pernet(sock_net(sk));
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2021-02-01 23:09:07 +00:00
|
|
|
add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
|
|
|
|
local_addr_max = mptcp_pm_get_local_addr_max(msk);
|
|
|
|
subflows_max = mptcp_pm_get_subflows_max(msk);
|
|
|
|
|
2022-01-07 00:20:22 +00:00
|
|
|
/* do lazy endpoint usage accounting for the MPC subflows */
|
|
|
|
if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
|
2022-07-11 19:16:31 +00:00
|
|
|
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
2022-01-07 00:20:23 +00:00
|
|
|
struct mptcp_addr_info mpc_addr;
|
2022-07-11 19:16:31 +00:00
|
|
|
bool backup = false;
|
2022-01-07 00:20:22 +00:00
|
|
|
|
2023-06-08 13:20:49 +00:00
|
|
|
mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
|
2022-07-11 19:16:31 +00:00
|
|
|
rcu_read_lock();
|
2024-03-05 11:04:33 +00:00
|
|
|
entry = __lookup_addr(pernet, &mpc_addr);
|
2022-07-11 19:16:31 +00:00
|
|
|
if (entry) {
|
|
|
|
__clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
|
2022-07-11 19:16:32 +00:00
|
|
|
msk->mpc_endpoint_id = entry->addr.id;
|
2022-07-11 19:16:31 +00:00
|
|
|
backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (backup)
|
|
|
|
mptcp_pm_send_ack(msk, subflow, true, backup);
|
2022-01-07 00:20:22 +00:00
|
|
|
|
|
|
|
msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
|
2021-02-01 23:09:07 +00:00
|
|
|
msk->pm.local_addr_used, local_addr_max,
|
|
|
|
msk->pm.add_addr_signaled, add_addr_signal_max,
|
|
|
|
msk->pm.subflows, subflows_max);
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
/* check first for announce */
|
2021-02-01 23:09:07 +00:00
|
|
|
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
|
2022-02-18 21:35:40 +00:00
|
|
|
/* due to racing events on both ends we can reach here while
|
|
|
|
* previous add address is still running: if we invoke now
|
|
|
|
* mptcp_pm_announce_addr(), that will fail and the
|
|
|
|
* corresponding id will be marked as used.
|
|
|
|
* Instead let the PM machinery reschedule us when the
|
|
|
|
* current address announce will be completed.
|
|
|
|
*/
|
|
|
|
if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
|
|
|
|
return;
|
|
|
|
|
2024-07-31 11:05:56 +00:00
|
|
|
local = select_signal_address(pernet, msk);
|
2024-07-31 11:05:55 +00:00
|
|
|
if (!local)
|
|
|
|
goto subflow;
|
|
|
|
|
2024-07-31 11:05:56 +00:00
|
|
|
/* If the alloc fails, we are on memory pressure, not worth
|
|
|
|
* continuing, and trying to create subflows.
|
|
|
|
*/
|
2024-07-31 11:05:55 +00:00
|
|
|
if (!mptcp_pm_alloc_anno_list(msk, &local->addr))
|
2024-07-31 11:05:56 +00:00
|
|
|
return;
|
2024-07-31 11:05:55 +00:00
|
|
|
|
|
|
|
__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
|
|
|
|
msk->pm.add_addr_signaled++;
|
|
|
|
mptcp_pm_announce_addr(msk, &local->addr, false);
|
|
|
|
mptcp_pm_nl_addr_send_ack(msk);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2024-07-31 11:05:55 +00:00
|
|
|
subflow:
|
2020-03-27 21:48:51 +00:00
|
|
|
/* check if should create a new subflow */
|
2022-01-07 00:20:23 +00:00
|
|
|
while (msk->pm.local_addr_used < local_addr_max &&
|
|
|
|
msk->pm.subflows < subflows_max) {
|
|
|
|
struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
|
|
|
|
bool fullmesh;
|
|
|
|
int i, nr;
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
local = select_local_address(pernet, msk);
|
2022-01-07 00:20:23 +00:00
|
|
|
if (!local)
|
|
|
|
break;
|
2021-01-25 18:59:02 +00:00
|
|
|
|
2022-01-07 00:20:23 +00:00
|
|
|
fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
|
|
|
|
|
|
|
|
msk->pm.local_addr_used++;
|
2023-01-25 10:47:21 +00:00
|
|
|
__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
|
|
|
|
nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
|
|
|
|
if (nr == 0)
|
|
|
|
continue;
|
|
|
|
|
2022-01-07 00:20:23 +00:00
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
for (i = 0; i < nr; i++)
|
|
|
|
__mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
2022-01-07 00:20:23 +00:00
|
|
|
mptcp_pm_nl_check_work_pending(msk);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2021-02-12 23:59:54 +00:00
|
|
|
static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
mptcp_pm_create_subflow_or_signal_addr(msk);
|
|
|
|
}
|
|
|
|
|
2021-02-12 23:59:54 +00:00
|
|
|
static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
mptcp_pm_create_subflow_or_signal_addr(msk);
|
|
|
|
}
|
|
|
|
|
2021-08-17 22:07:24 +00:00
|
|
|
/* Fill all the local addresses into the array addrs[],
|
|
|
|
* and return the array size.
|
|
|
|
*/
|
|
|
|
static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
|
2023-01-25 10:47:21 +00:00
|
|
|
struct mptcp_addr_info *remote,
|
2021-08-17 22:07:24 +00:00
|
|
|
struct mptcp_addr_info *addrs)
|
|
|
|
{
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
struct pm_nl_pernet *pernet;
|
|
|
|
unsigned int subflows_max;
|
|
|
|
int i = 0;
|
|
|
|
|
2022-04-08 19:45:57 +00:00
|
|
|
pernet = pm_nl_get_pernet_from_msk(msk);
|
2021-08-17 22:07:24 +00:00
|
|
|
subflows_max = mptcp_pm_get_subflows_max(msk);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
|
|
|
|
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
|
|
|
|
continue;
|
|
|
|
|
2023-01-25 10:47:21 +00:00
|
|
|
if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote))
|
|
|
|
continue;
|
2021-08-17 22:07:24 +00:00
|
|
|
|
|
|
|
if (msk->pm.subflows < subflows_max) {
|
|
|
|
msk->pm.subflows++;
|
|
|
|
addrs[i++] = entry->addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
/* If the array is empty, fill in the single
|
|
|
|
* 'IPADDRANY' local address
|
|
|
|
*/
|
|
|
|
if (!i) {
|
2023-01-25 10:47:21 +00:00
|
|
|
struct mptcp_addr_info local;
|
|
|
|
|
2021-08-17 22:07:24 +00:00
|
|
|
memset(&local, 0, sizeof(local));
|
2023-01-25 10:47:21 +00:00
|
|
|
local.family =
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
remote->family == AF_INET6 &&
|
|
|
|
ipv6_addr_v4mapped(&remote->addr6) ? AF_INET :
|
|
|
|
#endif
|
|
|
|
remote->family;
|
|
|
|
|
|
|
|
if (!mptcp_pm_addr_families_match(sk, &local, remote))
|
|
|
|
return 0;
|
2021-08-17 22:07:24 +00:00
|
|
|
|
|
|
|
msk->pm.subflows++;
|
|
|
|
addrs[i++] = local;
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2021-02-12 23:59:54 +00:00
|
|
|
static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2021-08-17 22:07:24 +00:00
|
|
|
struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
|
2020-03-27 21:48:51 +00:00
|
|
|
struct sock *sk = (struct sock *)msk;
|
2021-02-01 23:09:07 +00:00
|
|
|
unsigned int add_addr_accept_max;
|
2020-03-27 21:48:51 +00:00
|
|
|
struct mptcp_addr_info remote;
|
2021-02-01 23:09:07 +00:00
|
|
|
unsigned int subflows_max;
|
mptcp: pm: update add_addr counters after connect
The creation of new subflows can fail for different reasons. If no
subflow have been created using the received ADD_ADDR, the related
counters should not be updated, otherwise they will never be decremented
for events related to this ID later on.
For the moment, the number of accepted ADD_ADDR is only decremented upon
the reception of a related RM_ADDR, and only if the remote address ID is
currently being used by at least one subflow. In other words, if no
subflow can be created with the received address, the counter will not
be decremented. In this case, it is then important not to increment
pm.add_addr_accepted counter, and not to modify pm.accept_addr bit.
Note that this patch does not modify the behaviour in case of failures
later on, e.g. if the MP Join is dropped or rejected.
The "remove invalid addresses" MP Join subtest has been modified to
validate this case. The broadcast IP address is added before the "valid"
address that will be used to successfully create a subflow, and the
limit is decreased by one: without this patch, it was not possible to
create the last subflow, because:
- the broadcast address would have been accepted even if it was not
usable: the creation of a subflow to this address results in an error,
- the limit of 2 accepted ADD_ADDR would have then been reached.
Fixes: 01cacb00b35c ("mptcp: add netlink-based PM")
Cc: stable@vger.kernel.org
Co-developed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: YonglongLi <liyonglong@chinatelecom.cn>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-3-1ab9ddfa3d00@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-06-07 15:01:50 +00:00
|
|
|
bool sf_created = false;
|
2021-08-17 22:07:24 +00:00
|
|
|
int i, nr;
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2021-02-01 23:09:07 +00:00
|
|
|
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
|
|
|
|
subflows_max = mptcp_pm_get_subflows_max(msk);
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
pr_debug("accepted %d:%d remote family %d",
|
2021-02-01 23:09:07 +00:00
|
|
|
msk->pm.add_addr_accepted, add_addr_accept_max,
|
2020-03-27 21:48:51 +00:00
|
|
|
msk->pm.remote.family);
|
2021-03-26 18:26:32 +00:00
|
|
|
|
2022-02-18 21:35:41 +00:00
|
|
|
remote = msk->pm.remote;
|
2022-03-17 22:14:44 +00:00
|
|
|
mptcp_pm_announce_addr(msk, &remote, true);
|
|
|
|
mptcp_pm_nl_addr_send_ack(msk);
|
|
|
|
|
2022-02-18 21:35:41 +00:00
|
|
|
if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
|
2022-03-17 22:14:44 +00:00
|
|
|
return;
|
2021-03-26 18:26:32 +00:00
|
|
|
|
2022-02-18 21:35:41 +00:00
|
|
|
/* pick id 0 port, if none is provided the remote address */
|
2022-03-17 22:14:44 +00:00
|
|
|
if (!remote.port)
|
2022-02-18 21:35:41 +00:00
|
|
|
remote.port = sk->sk_dport;
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
/* connect to the specified remote address, using whatever
|
|
|
|
* local address the routing configuration will pick.
|
|
|
|
*/
|
2023-01-25 10:47:21 +00:00
|
|
|
nr = fill_local_addresses_vec(msk, &remote, addrs);
|
|
|
|
if (nr == 0)
|
|
|
|
return;
|
2021-08-17 22:07:24 +00:00
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
2021-08-17 22:07:24 +00:00
|
|
|
for (i = 0; i < nr; i++)
|
mptcp: pm: update add_addr counters after connect
The creation of new subflows can fail for different reasons. If no
subflow have been created using the received ADD_ADDR, the related
counters should not be updated, otherwise they will never be decremented
for events related to this ID later on.
For the moment, the number of accepted ADD_ADDR is only decremented upon
the reception of a related RM_ADDR, and only if the remote address ID is
currently being used by at least one subflow. In other words, if no
subflow can be created with the received address, the counter will not
be decremented. In this case, it is then important not to increment
pm.add_addr_accepted counter, and not to modify pm.accept_addr bit.
Note that this patch does not modify the behaviour in case of failures
later on, e.g. if the MP Join is dropped or rejected.
The "remove invalid addresses" MP Join subtest has been modified to
validate this case. The broadcast IP address is added before the "valid"
address that will be used to successfully create a subflow, and the
limit is decreased by one: without this patch, it was not possible to
create the last subflow, because:
- the broadcast address would have been accepted even if it was not
usable: the creation of a subflow to this address results in an error,
- the limit of 2 accepted ADD_ADDR would have then been reached.
Fixes: 01cacb00b35c ("mptcp: add netlink-based PM")
Cc: stable@vger.kernel.org
Co-developed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: YonglongLi <liyonglong@chinatelecom.cn>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-3-1ab9ddfa3d00@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-06-07 15:01:50 +00:00
|
|
|
if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
|
|
|
|
sf_created = true;
|
2020-03-27 21:48:51 +00:00
|
|
|
spin_lock_bh(&msk->pm.lock);
|
mptcp: pm: update add_addr counters after connect
The creation of new subflows can fail for different reasons. If no
subflow have been created using the received ADD_ADDR, the related
counters should not be updated, otherwise they will never be decremented
for events related to this ID later on.
For the moment, the number of accepted ADD_ADDR is only decremented upon
the reception of a related RM_ADDR, and only if the remote address ID is
currently being used by at least one subflow. In other words, if no
subflow can be created with the received address, the counter will not
be decremented. In this case, it is then important not to increment
pm.add_addr_accepted counter, and not to modify pm.accept_addr bit.
Note that this patch does not modify the behaviour in case of failures
later on, e.g. if the MP Join is dropped or rejected.
The "remove invalid addresses" MP Join subtest has been modified to
validate this case. The broadcast IP address is added before the "valid"
address that will be used to successfully create a subflow, and the
limit is decreased by one: without this patch, it was not possible to
create the last subflow, because:
- the broadcast address would have been accepted even if it was not
usable: the creation of a subflow to this address results in an error,
- the limit of 2 accepted ADD_ADDR would have then been reached.
Fixes: 01cacb00b35c ("mptcp: add netlink-based PM")
Cc: stable@vger.kernel.org
Co-developed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: YonglongLi <liyonglong@chinatelecom.cn>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-3-1ab9ddfa3d00@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-06-07 15:01:50 +00:00
|
|
|
|
|
|
|
if (sf_created) {
|
|
|
|
msk->pm.add_addr_accepted++;
|
|
|
|
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
|
|
|
|
msk->pm.subflows >= subflows_max)
|
|
|
|
WRITE_ONCE(msk->pm.accept_addr, false);
|
|
|
|
}
|
2020-11-19 19:46:00 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 18:26:41 +00:00
|
|
|
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
|
2020-11-19 19:46:00 +00:00
|
|
|
{
|
|
|
|
struct mptcp_subflow_context *subflow;
|
|
|
|
|
2021-02-04 23:23:30 +00:00
|
|
|
msk_owned_by_me(msk);
|
|
|
|
lockdep_assert_held(&msk->pm.lock);
|
|
|
|
|
2021-03-26 18:26:40 +00:00
|
|
|
if (!mptcp_pm_should_add_signal(msk) &&
|
|
|
|
!mptcp_pm_should_rm_signal(msk))
|
2020-11-19 19:46:00 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
|
2022-07-11 19:16:29 +00:00
|
|
|
if (subflow)
|
|
|
|
mptcp_pm_send_ack(msk, subflow, false, false);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2022-07-05 21:32:14 +00:00
|
|
|
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
|
|
|
|
struct mptcp_addr_info *addr,
|
|
|
|
struct mptcp_addr_info *rem,
|
|
|
|
u8 bkup)
|
2021-01-09 00:47:57 +00:00
|
|
|
{
|
|
|
|
struct mptcp_subflow_context *subflow;
|
|
|
|
|
|
|
|
pr_debug("bkup=%d", bkup);
|
|
|
|
|
|
|
|
mptcp_for_each_subflow(msk, subflow) {
|
|
|
|
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
2022-07-05 21:32:14 +00:00
|
|
|
struct mptcp_addr_info local, remote;
|
2021-01-09 00:47:57 +00:00
|
|
|
|
2023-06-08 13:20:49 +00:00
|
|
|
mptcp_local_address((struct sock_common *)ssk, &local);
|
2022-05-04 02:38:49 +00:00
|
|
|
if (!mptcp_addresses_equal(&local, addr, addr->port))
|
2021-01-09 00:47:57 +00:00
|
|
|
continue;
|
|
|
|
|
2022-07-05 21:32:14 +00:00
|
|
|
if (rem && rem->family != AF_UNSPEC) {
|
|
|
|
remote_address((struct sock_common *)ssk, &remote);
|
|
|
|
if (!mptcp_addresses_equal(&remote, rem, rem->port))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-07-11 19:16:29 +00:00
|
|
|
__mptcp_pm_send_ack(msk, subflow, true, bkup);
|
2021-01-09 00:47:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-07-11 19:16:32 +00:00
|
|
|
static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
|
|
|
|
{
|
|
|
|
return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:08:52 +00:00
|
|
|
static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
|
|
|
|
const struct mptcp_rm_list *rm_list,
|
|
|
|
enum linux_mptcp_mib_field rm_type)
|
2020-09-24 00:29:49 +00:00
|
|
|
{
|
|
|
|
struct mptcp_subflow_context *subflow, *tmp;
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
2021-03-13 01:16:15 +00:00
|
|
|
u8 i;
|
2020-09-24 00:29:49 +00:00
|
|
|
|
2021-03-31 00:08:52 +00:00
|
|
|
pr_debug("%s rm_list_nr %d",
|
|
|
|
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
|
2020-09-24 00:29:49 +00:00
|
|
|
|
2021-02-04 23:23:30 +00:00
|
|
|
msk_owned_by_me(msk);
|
|
|
|
|
2021-12-11 16:11:12 +00:00
|
|
|
if (sk->sk_state == TCP_LISTEN)
|
|
|
|
return;
|
|
|
|
|
2021-03-31 00:08:52 +00:00
|
|
|
if (!rm_list->nr)
|
2020-09-24 00:29:49 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (list_empty(&msk->conn_list))
|
|
|
|
return;
|
|
|
|
|
2021-03-31 00:08:52 +00:00
|
|
|
for (i = 0; i < rm_list->nr; i++) {
|
2022-07-11 19:16:32 +00:00
|
|
|
u8 rm_id = rm_list->ids[i];
|
2022-01-07 00:20:20 +00:00
|
|
|
bool removed = false;
|
|
|
|
|
2022-09-06 20:55:39 +00:00
|
|
|
mptcp_for_each_subflow_safe(msk, subflow, tmp) {
|
2021-03-13 01:16:15 +00:00
|
|
|
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
2024-02-15 18:25:32 +00:00
|
|
|
u8 remote_id = READ_ONCE(subflow->remote_id);
|
2021-03-13 01:16:15 +00:00
|
|
|
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
|
2024-02-15 18:25:31 +00:00
|
|
|
u8 id = subflow_get_local_id(subflow);
|
2021-03-31 00:08:52 +00:00
|
|
|
|
2024-02-15 18:25:32 +00:00
|
|
|
if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
|
2022-07-11 19:16:32 +00:00
|
|
|
continue;
|
|
|
|
if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
|
2021-03-13 01:16:15 +00:00
|
|
|
continue;
|
2020-09-24 00:29:49 +00:00
|
|
|
|
2022-07-11 19:16:32 +00:00
|
|
|
pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
|
2021-03-31 00:08:52 +00:00
|
|
|
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
|
2024-02-15 18:25:32 +00:00
|
|
|
i, rm_id, id, remote_id, msk->mpc_endpoint_id);
|
2021-03-13 01:16:15 +00:00
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
mptcp_subflow_shutdown(sk, ssk, how);
|
2022-01-07 00:20:23 +00:00
|
|
|
|
|
|
|
/* the following takes care of updating the subflows counter */
|
2021-03-13 01:16:15 +00:00
|
|
|
mptcp_close_ssk(sk, ssk, subflow);
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2020-09-24 00:29:49 +00:00
|
|
|
|
2022-01-07 00:20:20 +00:00
|
|
|
removed = true;
|
2024-06-07 15:01:49 +00:00
|
|
|
if (rm_type == MPTCP_MIB_RMSUBFLOW)
|
|
|
|
__MPTCP_INC_STATS(sock_net(sk), rm_type);
|
2021-03-13 01:16:15 +00:00
|
|
|
}
|
2022-07-05 21:32:16 +00:00
|
|
|
if (rm_type == MPTCP_MIB_RMSUBFLOW)
|
2022-07-11 19:16:32 +00:00
|
|
|
__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
|
2024-06-07 15:01:49 +00:00
|
|
|
else if (rm_type == MPTCP_MIB_RMADDR)
|
|
|
|
__MPTCP_INC_STATS(sock_net(sk), rm_type);
|
2022-01-07 00:20:20 +00:00
|
|
|
if (!removed)
|
|
|
|
continue;
|
|
|
|
|
2022-05-02 20:52:31 +00:00
|
|
|
if (!mptcp_pm_is_kernel(msk))
|
|
|
|
continue;
|
|
|
|
|
2022-01-07 00:20:20 +00:00
|
|
|
if (rm_type == MPTCP_MIB_RMADDR) {
|
|
|
|
msk->pm.add_addr_accepted--;
|
|
|
|
WRITE_ONCE(msk->pm.accept_addr, true);
|
|
|
|
} else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
|
|
|
|
msk->pm.local_addr_used--;
|
|
|
|
}
|
2020-09-24 00:29:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:08:52 +00:00
|
|
|
static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
|
|
|
|
{
|
|
|
|
mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
|
|
|
|
const struct mptcp_rm_list *rm_list)
|
|
|
|
{
|
|
|
|
mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
|
|
|
|
}
|
|
|
|
|
2021-02-12 23:59:54 +00:00
|
|
|
void mptcp_pm_nl_work(struct mptcp_sock *msk)
|
|
|
|
{
|
|
|
|
struct mptcp_pm_data *pm = &msk->pm;
|
|
|
|
|
|
|
|
msk_owned_by_me(msk);
|
|
|
|
|
2022-01-07 00:20:22 +00:00
|
|
|
if (!(pm->status & MPTCP_PM_WORK_MASK))
|
|
|
|
return;
|
|
|
|
|
2021-02-12 23:59:54 +00:00
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
|
|
|
|
|
|
pr_debug("msk=%p status=%x", msk, pm->status);
|
|
|
|
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
|
|
|
|
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
|
|
|
|
mptcp_pm_nl_add_addr_received(msk);
|
|
|
|
}
|
|
|
|
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
|
|
|
|
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
|
2021-03-26 18:26:41 +00:00
|
|
|
mptcp_pm_nl_addr_send_ack(msk);
|
2021-02-12 23:59:54 +00:00
|
|
|
}
|
|
|
|
if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
|
|
|
|
pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
|
|
|
|
mptcp_pm_nl_rm_addr_received(msk);
|
|
|
|
}
|
|
|
|
if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
|
|
|
|
pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
|
|
|
|
mptcp_pm_nl_fully_established(msk);
|
|
|
|
}
|
|
|
|
if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
|
|
|
|
pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
|
|
|
|
mptcp_pm_nl_subflow_established(msk);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
static bool address_use_port(struct mptcp_pm_addr_entry *entry)
|
|
|
|
{
|
2021-04-07 00:15:57 +00:00
|
|
|
return (entry->flags &
|
2020-03-27 21:48:51 +00:00
|
|
|
(MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
|
|
|
|
MPTCP_PM_ADDR_FLAG_SIGNAL;
|
|
|
|
}
|
|
|
|
|
2022-03-07 20:44:36 +00:00
|
|
|
/* caller must ensure the RCU grace period is already elapsed */
|
|
|
|
static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
|
|
|
|
{
|
|
|
|
if (entry->lsk)
|
|
|
|
sock_release(entry->lsk);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
|
2024-02-15 18:25:29 +00:00
|
|
|
struct mptcp_pm_addr_entry *entry,
|
|
|
|
bool needs_id)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2022-03-07 20:44:36 +00:00
|
|
|
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
|
2021-02-01 23:09:06 +00:00
|
|
|
unsigned int addr_max;
|
2020-03-27 21:48:51 +00:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_bh(&pernet->lock);
|
|
|
|
/* to keep the code simple, don't do IDR-like allocation for address ID,
|
|
|
|
* just bail when we exceed limits
|
|
|
|
*/
|
2022-01-07 00:20:22 +00:00
|
|
|
if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID)
|
2021-01-09 00:47:55 +00:00
|
|
|
pernet->next_id = 1;
|
2022-11-18 18:46:08 +00:00
|
|
|
if (pernet->addrs >= MPTCP_PM_ADDR_MAX) {
|
|
|
|
ret = -ERANGE;
|
2020-03-27 21:48:51 +00:00
|
|
|
goto out;
|
2022-11-18 18:46:08 +00:00
|
|
|
}
|
|
|
|
if (test_bit(entry->addr.id, pernet->id_bitmap)) {
|
|
|
|
ret = -EBUSY;
|
2021-01-09 00:47:55 +00:00
|
|
|
goto out;
|
2022-11-18 18:46:08 +00:00
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
/* do not insert duplicate address, differentiate on port only
|
|
|
|
* singled addresses
|
|
|
|
*/
|
2022-07-11 19:16:30 +00:00
|
|
|
if (!address_use_port(entry))
|
|
|
|
entry->addr.port = 0;
|
2020-03-27 21:48:51 +00:00
|
|
|
list_for_each_entry(cur, &pernet->local_addr_list, list) {
|
2022-05-04 02:38:49 +00:00
|
|
|
if (mptcp_addresses_equal(&cur->addr, &entry->addr,
|
2022-07-11 19:16:30 +00:00
|
|
|
cur->addr.port || entry->addr.port)) {
|
2022-03-07 20:44:36 +00:00
|
|
|
/* allow replacing the exiting endpoint only if such
|
|
|
|
* endpoint is an implicit one and the user-space
|
|
|
|
* did not provide an endpoint id
|
|
|
|
*/
|
2022-11-18 18:46:08 +00:00
|
|
|
if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) {
|
|
|
|
ret = -EEXIST;
|
2022-03-07 20:44:36 +00:00
|
|
|
goto out;
|
2022-11-18 18:46:08 +00:00
|
|
|
}
|
2022-03-07 20:44:36 +00:00
|
|
|
if (entry->addr.id)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pernet->addrs--;
|
|
|
|
entry->addr.id = cur->addr.id;
|
|
|
|
list_del_rcu(&cur->list);
|
|
|
|
del_entry = cur;
|
|
|
|
break;
|
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2024-02-15 18:25:29 +00:00
|
|
|
if (!entry->addr.id && needs_id) {
|
2021-01-09 00:47:55 +00:00
|
|
|
find_next:
|
|
|
|
entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
|
2022-01-07 00:20:22 +00:00
|
|
|
MPTCP_PM_MAX_ADDR_ID + 1,
|
2021-01-09 00:47:55 +00:00
|
|
|
pernet->next_id);
|
2021-12-17 23:37:02 +00:00
|
|
|
if (!entry->addr.id && pernet->next_id != 1) {
|
2021-01-09 00:47:55 +00:00
|
|
|
pernet->next_id = 1;
|
|
|
|
goto find_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-15 18:25:29 +00:00
|
|
|
if (!entry->addr.id && needs_id)
|
2021-01-09 00:47:55 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
__set_bit(entry->addr.id, pernet->id_bitmap);
|
|
|
|
if (entry->addr.id > pernet->next_id)
|
|
|
|
pernet->next_id = entry->addr.id;
|
|
|
|
|
2021-04-07 00:15:57 +00:00
|
|
|
if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
|
2021-02-01 23:09:06 +00:00
|
|
|
addr_max = pernet->add_addr_signal_max;
|
|
|
|
WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1);
|
|
|
|
}
|
2021-04-07 00:15:57 +00:00
|
|
|
if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
|
2021-02-01 23:09:06 +00:00
|
|
|
addr_max = pernet->local_addr_max;
|
|
|
|
WRITE_ONCE(pernet->local_addr_max, addr_max + 1);
|
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
pernet->addrs++;
|
2022-07-11 19:16:30 +00:00
|
|
|
if (!entry->addr.port)
|
|
|
|
list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
|
|
|
|
else
|
|
|
|
list_add_rcu(&entry->list, &pernet->local_addr_list);
|
2020-03-27 21:48:51 +00:00
|
|
|
ret = entry->addr.id;
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
2022-03-07 20:44:36 +00:00
|
|
|
|
|
|
|
/* just replaced an existing entry, free it */
|
|
|
|
if (del_entry) {
|
|
|
|
synchronize_rcu();
|
|
|
|
__mptcp_pm_release_addr_entry(del_entry);
|
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-03-09 14:50:04 +00:00
|
|
|
static struct lock_class_key mptcp_slock_keys[2];
|
|
|
|
static struct lock_class_key mptcp_keys[2];
|
|
|
|
|
2021-02-01 23:09:12 +00:00
|
|
|
static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
|
|
|
|
struct mptcp_pm_addr_entry *entry)
|
|
|
|
{
|
2023-03-09 14:50:04 +00:00
|
|
|
bool is_ipv6 = sk->sk_family == AF_INET6;
|
2022-02-10 01:25:08 +00:00
|
|
|
int addrlen = sizeof(struct sockaddr_in);
|
2021-02-01 23:09:12 +00:00
|
|
|
struct sockaddr_storage addr;
|
2023-08-11 15:57:24 +00:00
|
|
|
struct sock *newsk, *ssk;
|
2021-02-01 23:09:12 +00:00
|
|
|
int backlog = 1024;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = sock_create_kern(sock_net(sk), entry->addr.family,
|
|
|
|
SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2023-02-07 13:04:15 +00:00
|
|
|
newsk = entry->lsk->sk;
|
|
|
|
if (!newsk)
|
2022-11-18 18:46:07 +00:00
|
|
|
return -EINVAL;
|
2021-02-01 23:09:12 +00:00
|
|
|
|
2023-03-09 14:50:04 +00:00
|
|
|
/* The subflow socket lock is acquired in a nested to the msk one
|
|
|
|
* in several places, even by the TCP stack, and this msk is a kernel
|
|
|
|
* socket: lockdep complains. Instead of propagating the _nested
|
|
|
|
* modifiers in several places, re-init the lock class for the msk
|
|
|
|
* socket to an mptcp specific one.
|
|
|
|
*/
|
|
|
|
sock_lock_init_class_and_name(newsk,
|
|
|
|
is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
|
|
|
|
&mptcp_slock_keys[is_ipv6],
|
|
|
|
is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
|
|
|
|
&mptcp_keys[is_ipv6]);
|
|
|
|
|
2023-02-07 13:04:15 +00:00
|
|
|
lock_sock(newsk);
|
2023-08-11 15:57:25 +00:00
|
|
|
ssk = __mptcp_nmpc_sk(mptcp_sk(newsk));
|
2023-02-07 13:04:15 +00:00
|
|
|
release_sock(newsk);
|
2023-08-11 15:57:25 +00:00
|
|
|
if (IS_ERR(ssk))
|
|
|
|
return PTR_ERR(ssk);
|
2021-02-01 23:09:12 +00:00
|
|
|
|
|
|
|
mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
|
2022-02-10 01:25:08 +00:00
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
if (entry->addr.family == AF_INET6)
|
|
|
|
addrlen = sizeof(struct sockaddr_in6);
|
|
|
|
#endif
|
2023-08-11 15:57:24 +00:00
|
|
|
if (ssk->sk_family == AF_INET)
|
|
|
|
err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen);
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
else if (ssk->sk_family == AF_INET6)
|
|
|
|
err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen);
|
|
|
|
#endif
|
2022-11-18 18:46:08 +00:00
|
|
|
if (err)
|
2022-11-18 18:46:07 +00:00
|
|
|
return err;
|
2021-02-01 23:09:12 +00:00
|
|
|
|
2023-12-22 12:47:23 +00:00
|
|
|
/* We don't use mptcp_set_state() here because it needs to be called
|
|
|
|
* under the msk socket lock. For the moment, that will not bring
|
|
|
|
* anything more than only calling inet_sk_state_store(), because the
|
|
|
|
* old status is known (TCP_CLOSE).
|
|
|
|
*/
|
2023-06-20 16:24:23 +00:00
|
|
|
inet_sk_state_store(newsk, TCP_LISTEN);
|
2023-08-11 15:57:24 +00:00
|
|
|
lock_sock(ssk);
|
|
|
|
err = __inet_listen_sk(ssk, backlog);
|
|
|
|
if (!err)
|
|
|
|
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
|
|
|
|
release_sock(ssk);
|
|
|
|
return err;
|
2021-02-01 23:09:12 +00:00
|
|
|
}
|
|
|
|
|
2023-06-08 13:20:50 +00:00
|
|
|
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
struct pm_nl_pernet *pernet;
|
|
|
|
int ret = -1;
|
|
|
|
|
2022-04-08 19:45:57 +00:00
|
|
|
pernet = pm_nl_get_pernet_from_msk(msk);
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
|
2023-06-08 13:20:50 +00:00
|
|
|
if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
|
2020-03-27 21:48:51 +00:00
|
|
|
ret = entry->addr.id;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (ret >= 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* address not found, add to local list */
|
2020-09-09 03:01:24 +00:00
|
|
|
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2023-06-08 13:20:50 +00:00
|
|
|
entry->addr = *skc;
|
2021-01-09 00:47:55 +00:00
|
|
|
entry->addr.id = 0;
|
2021-02-01 23:09:12 +00:00
|
|
|
entry->addr.port = 0;
|
2021-04-07 00:15:57 +00:00
|
|
|
entry->ifindex = 0;
|
2022-03-07 20:44:36 +00:00
|
|
|
entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
|
2021-02-01 23:09:12 +00:00
|
|
|
entry->lsk = NULL;
|
2024-02-15 18:25:29 +00:00
|
|
|
ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (ret < 0)
|
|
|
|
kfree(entry);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-07-27 10:01:28 +00:00
|
|
|
bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
|
|
|
|
{
|
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
bool backup = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
|
|
|
|
if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
|
|
|
|
backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return backup;
|
|
|
|
}
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
#define MPTCP_PM_CMD_GRP_OFFSET 0
|
|
|
|
#define MPTCP_PM_EV_GRP_OFFSET 1
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
static const struct genl_multicast_group mptcp_pm_mcgrps[] = {
|
|
|
|
[MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, },
|
2021-02-13 00:00:01 +00:00
|
|
|
[MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME,
|
2023-12-20 15:43:58 +00:00
|
|
|
.flags = GENL_MCAST_CAP_NET_ADMIN,
|
2021-02-13 00:00:01 +00:00
|
|
|
},
|
2020-03-27 21:48:51 +00:00
|
|
|
};
|
|
|
|
|
2021-08-13 22:15:45 +00:00
|
|
|
void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
|
|
|
|
{
|
|
|
|
struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
unsigned int active_max_loss_cnt;
|
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
unsigned int stale_loss_cnt;
|
|
|
|
bool slow;
|
|
|
|
|
|
|
|
stale_loss_cnt = mptcp_stale_loss_cnt(net);
|
|
|
|
if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* look for another available subflow not in loss state */
|
|
|
|
active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1);
|
|
|
|
mptcp_for_each_subflow(msk, iter) {
|
|
|
|
if (iter != subflow && mptcp_subflow_active(iter) &&
|
|
|
|
iter->stale_count < active_max_loss_cnt) {
|
|
|
|
/* we have some alternatives, try to mark this subflow as idle ...*/
|
|
|
|
slow = lock_sock_fast(ssk);
|
|
|
|
if (!tcp_rtx_and_write_queues_empty(ssk)) {
|
|
|
|
subflow->stale = 1;
|
|
|
|
__mptcp_retransmit_pending_data(sk);
|
2023-01-06 18:57:18 +00:00
|
|
|
MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE);
|
2021-08-13 22:15:45 +00:00
|
|
|
}
|
|
|
|
unlock_sock_fast(ssk, slow);
|
|
|
|
|
2022-06-27 12:16:25 +00:00
|
|
|
/* always try to push the pending data regardless of re-injections:
|
2021-08-13 22:15:45 +00:00
|
|
|
* we can possibly use backup subflows now, and subflow selection
|
|
|
|
* is cheap under the msk socket lock
|
|
|
|
*/
|
|
|
|
__mptcp_push_pending(sk, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
static int mptcp_pm_family_to_addr(int family)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
if (family == AF_INET6)
|
|
|
|
return MPTCP_PM_ADDR_ATTR_ADDR6;
|
|
|
|
#endif
|
|
|
|
return MPTCP_PM_ADDR_ATTR_ADDR4;
|
|
|
|
}
|
|
|
|
|
2022-05-04 02:38:51 +00:00
|
|
|
static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[],
|
|
|
|
const struct nlattr *attr,
|
|
|
|
struct genl_info *info,
|
|
|
|
struct mptcp_addr_info *addr,
|
|
|
|
bool require_family)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
int err, addr_addr;
|
|
|
|
|
|
|
|
if (!attr) {
|
|
|
|
GENL_SET_ERR_MSG(info, "missing address info");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no validation needed - was already done via nested policy */
|
|
|
|
err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
|
2023-10-23 18:17:07 +00:00
|
|
|
mptcp_pm_address_nl_policy, info->extack);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2022-05-04 02:38:51 +00:00
|
|
|
if (tb[MPTCP_PM_ADDR_ATTR_ID])
|
|
|
|
addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) {
|
|
|
|
if (!require_family)
|
2022-12-09 00:44:31 +00:00
|
|
|
return 0;
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
NL_SET_ERR_MSG_ATTR(info->extack, attr,
|
|
|
|
"missing family");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-05-04 02:38:51 +00:00
|
|
|
addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
|
|
|
|
if (addr->family != AF_INET
|
2020-03-27 21:48:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
2022-05-04 02:38:51 +00:00
|
|
|
&& addr->family != AF_INET6
|
2020-03-27 21:48:51 +00:00
|
|
|
#endif
|
|
|
|
) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(info->extack, attr,
|
|
|
|
"unknown address family");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2022-05-04 02:38:51 +00:00
|
|
|
addr_addr = mptcp_pm_family_to_addr(addr->family);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (!tb[addr_addr]) {
|
|
|
|
NL_SET_ERR_MSG_ATTR(info->extack, attr,
|
|
|
|
"missing address data");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
2022-05-04 02:38:51 +00:00
|
|
|
if (addr->family == AF_INET6)
|
|
|
|
addr->addr6 = nla_get_in6_addr(tb[addr_addr]);
|
2020-03-27 21:48:51 +00:00
|
|
|
else
|
|
|
|
#endif
|
2022-05-04 02:38:51 +00:00
|
|
|
addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]);
|
|
|
|
|
|
|
|
if (tb[MPTCP_PM_ADDR_ATTR_PORT])
|
|
|
|
addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
|
|
|
|
|
2022-12-09 00:44:31 +00:00
|
|
|
return 0;
|
2022-05-04 02:38:51 +00:00
|
|
|
}
|
|
|
|
|
mptcp: netlink: allow userspace-driven subflow establishment
This allows userspace to tell kernel to add a new subflow to an existing
mptcp connection.
Userspace provides the token to identify the mptcp-level connection
that needs a change in active subflows and the local and remote
addresses of the new or the to-be-removed subflow.
MPTCP_PM_CMD_SUBFLOW_CREATE requires the following parameters:
{ token, { loc_id, family, loc_addr4 | loc_addr6 }, { family, rem_addr4 |
rem_addr6, rem_port }
MPTCP_PM_CMD_SUBFLOW_DESTROY requires the following parameters:
{ token, { family, loc_addr4 | loc_addr6, loc_port }, { family, rem_addr4 |
rem_addr6, rem_port }
Acked-by: Paolo Abeni <pabeni@redhat.com>
Co-developed-by: Kishen Maloor <kishen.maloor@intel.com>
Signed-off-by: Kishen Maloor <kishen.maloor@intel.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2022-05-04 02:38:56 +00:00
|
|
|
int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
|
|
|
|
struct mptcp_addr_info *addr)
|
|
|
|
{
|
|
|
|
struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
|
|
|
|
|
|
|
|
memset(addr, 0, sizeof(*addr));
|
|
|
|
|
|
|
|
return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true);
|
|
|
|
}
|
|
|
|
|
2022-05-04 02:38:52 +00:00
|
|
|
int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
|
|
|
|
bool require_family,
|
|
|
|
struct mptcp_pm_addr_entry *entry)
|
2022-05-04 02:38:51 +00:00
|
|
|
{
|
|
|
|
struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(entry, 0, sizeof(*entry));
|
|
|
|
|
|
|
|
err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2020-09-14 08:01:15 +00:00
|
|
|
if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
|
|
|
|
u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
|
|
|
|
|
2021-04-07 00:15:57 +00:00
|
|
|
entry->ifindex = val;
|
2020-09-14 08:01:15 +00:00
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
|
2021-04-07 00:15:57 +00:00
|
|
|
entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2022-02-05 00:03:29 +00:00
|
|
|
if (tb[MPTCP_PM_ADDR_ATTR_PORT])
|
2021-02-01 23:09:17 +00:00
|
|
|
entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
|
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
return pm_nl_get_pernet(genl_info_net(info));
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 23:09:08 +00:00
|
|
|
static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
|
|
|
|
{
|
|
|
|
struct mptcp_sock *msk;
|
|
|
|
long s_slot = 0, s_num = 0;
|
|
|
|
|
|
|
|
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
|
2022-04-27 22:50:00 +00:00
|
|
|
if (!READ_ONCE(msk->fully_established) ||
|
|
|
|
mptcp_pm_is_userspace(msk))
|
2021-02-01 23:09:08 +00:00
|
|
|
goto next;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
|
|
mptcp_pm_create_subflow_or_signal_addr(msk);
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
next:
|
|
|
|
sock_put(sk);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-15 18:25:29 +00:00
|
|
|
static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
|
|
|
|
struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
|
|
|
|
|
|
|
|
if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
|
|
|
|
mptcp_pm_address_nl_policy, info->extack) &&
|
|
|
|
tb[MPTCP_PM_ADDR_ATTR_ID])
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-10-23 18:17:11 +00:00
|
|
|
int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2023-10-23 18:17:07 +00:00
|
|
|
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
|
2020-03-27 21:48:51 +00:00
|
|
|
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
|
|
|
|
struct mptcp_pm_addr_entry addr, *entry;
|
|
|
|
int ret;
|
|
|
|
|
2022-05-04 02:38:51 +00:00
|
|
|
ret = mptcp_pm_parse_entry(attr, info, true, &addr);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2024-07-31 11:05:54 +00:00
|
|
|
if (addr.addr.port && !address_use_port(&addr)) {
|
|
|
|
GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port");
|
2022-02-05 00:03:29 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-03-07 20:44:39 +00:00
|
|
|
if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
|
|
|
|
addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
|
|
|
|
GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-03-07 20:44:36 +00:00
|
|
|
if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
|
|
|
|
GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-11-18 18:46:07 +00:00
|
|
|
entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (!entry) {
|
|
|
|
GENL_SET_ERR_MSG(info, "can't allocate addr");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
*entry = addr;
|
2021-02-01 23:09:12 +00:00
|
|
|
if (entry->addr.port) {
|
|
|
|
ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry);
|
|
|
|
if (ret) {
|
2022-11-18 18:46:08 +00:00
|
|
|
GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret);
|
2022-11-18 18:46:07 +00:00
|
|
|
goto out_free;
|
2021-02-01 23:09:12 +00:00
|
|
|
}
|
|
|
|
}
|
2024-02-15 18:25:29 +00:00
|
|
|
ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
|
|
|
|
!mptcp_pm_has_addr_attr_id(attr, info));
|
2020-03-27 21:48:51 +00:00
|
|
|
if (ret < 0) {
|
2022-11-18 18:46:08 +00:00
|
|
|
GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
|
2022-11-18 18:46:07 +00:00
|
|
|
goto out_free;
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 23:09:08 +00:00
|
|
|
mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
|
2020-03-27 21:48:51 +00:00
|
|
|
return 0;
|
2022-11-18 18:46:07 +00:00
|
|
|
|
|
|
|
out_free:
|
|
|
|
__mptcp_pm_release_addr_entry(entry);
|
|
|
|
return ret;
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
|
2023-06-08 13:20:51 +00:00
|
|
|
int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
|
|
|
|
u8 *flags, int *ifindex)
|
2021-08-17 22:07:22 +00:00
|
|
|
{
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
2022-05-04 02:38:50 +00:00
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
struct net *net = sock_net(sk);
|
2021-08-17 22:07:22 +00:00
|
|
|
|
2023-06-08 13:20:51 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
|
|
|
|
if (entry) {
|
|
|
|
*flags = entry->flags;
|
|
|
|
*ifindex = entry->ifindex;
|
2021-08-17 22:07:22 +00:00
|
|
|
}
|
2023-06-08 13:20:51 +00:00
|
|
|
rcu_read_unlock();
|
2021-08-17 22:07:22 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-24 00:29:54 +00:00
|
|
|
static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
|
2022-02-16 02:11:28 +00:00
|
|
|
const struct mptcp_addr_info *addr)
|
2020-09-24 00:29:54 +00:00
|
|
|
{
|
2020-09-24 00:30:02 +00:00
|
|
|
struct mptcp_pm_add_entry *entry;
|
2020-09-24 00:29:54 +00:00
|
|
|
|
mptcp: validate 'id' when stopping the ADD_ADDR retransmit timer
when Linux receives an echo-ed ADD_ADDR, it checks the IP address against
the list of "announced" addresses. In case of a positive match, the timer
that handles retransmissions is stopped regardless of the 'Address Id' in
the received packet: this behaviour does not comply with RFC8684 3.4.1.
Fix it by validating the 'Address Id' in received echo-ed ADD_ADDRs.
Tested using packetdrill, with the following captured output:
unpatched kernel:
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0xfd2e62517888fe29,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 1.2.3.4,mptcp dss ack 3013740213], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0xfd2e62517888fe29,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 90 198.51.100.2,mptcp dss ack 3013740213], length 0
^^^ retransmission is stopped here, but 'Address Id' is 90
patched kernel:
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 1.2.3.4,mptcp dss ack 1672384568], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 90 198.51.100.2,mptcp dss ack 1672384568], length 0
Out <...> Flags [.], ack 1, win 256, options [mptcp add-addr v1 id 1 198.51.100.2 hmac 0x1cf372d59e05f4b8,mptcp dss ack 3007449509], length 0
In <...> Flags [.], ack 1, win 257, options [mptcp add-addr v1-echo id 1 198.51.100.2,mptcp dss ack 1672384568], length 0
^^^ retransmission is stopped here, only when both 'Address Id' and 'IP Address' match
Fixes: 00cfd77b9063 ("mptcp: retransmit ADD_ADDR when timeout")
Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-05-25 21:23:13 +00:00
|
|
|
entry = mptcp_pm_del_add_timer(msk, addr, false);
|
2020-09-24 00:30:02 +00:00
|
|
|
if (entry) {
|
|
|
|
list_del(&entry->list);
|
|
|
|
kfree(entry);
|
|
|
|
return true;
|
2020-09-24 00:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
|
2022-02-16 02:11:28 +00:00
|
|
|
const struct mptcp_addr_info *addr,
|
2020-09-24 00:29:54 +00:00
|
|
|
bool force)
|
|
|
|
{
|
2021-03-13 01:16:12 +00:00
|
|
|
struct mptcp_rm_list list = { .nr = 0 };
|
2020-09-24 00:29:54 +00:00
|
|
|
bool ret;
|
|
|
|
|
2021-03-13 01:16:12 +00:00
|
|
|
list.ids[list.nr++] = addr->id;
|
|
|
|
|
2020-09-24 00:29:54 +00:00
|
|
|
ret = remove_anno_list_by_saddr(msk, addr);
|
2020-09-24 00:30:02 +00:00
|
|
|
if (ret || force) {
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2024-07-27 09:04:00 +00:00
|
|
|
msk->pm.add_addr_signaled -= ret;
|
2021-03-13 01:16:12 +00:00
|
|
|
mptcp_pm_remove_addr(msk, &list);
|
2020-09-24 00:30:02 +00:00
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
}
|
2020-09-24 00:29:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
|
2022-03-07 20:44:36 +00:00
|
|
|
const struct mptcp_pm_addr_entry *entry)
|
2020-09-24 00:29:54 +00:00
|
|
|
{
|
2022-03-07 20:44:36 +00:00
|
|
|
const struct mptcp_addr_info *addr = &entry->addr;
|
2021-03-13 01:16:16 +00:00
|
|
|
struct mptcp_rm_list list = { .nr = 0 };
|
2022-03-07 20:44:36 +00:00
|
|
|
long s_slot = 0, s_num = 0;
|
|
|
|
struct mptcp_sock *msk;
|
2020-09-24 00:29:54 +00:00
|
|
|
|
|
|
|
pr_debug("remove_id=%d", addr->id);
|
|
|
|
|
2021-03-13 01:16:16 +00:00
|
|
|
list.ids[list.nr++] = addr->id;
|
|
|
|
|
2020-09-24 00:29:54 +00:00
|
|
|
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
bool remove_subflow;
|
|
|
|
|
2022-04-27 22:50:00 +00:00
|
|
|
if (mptcp_pm_is_userspace(msk))
|
|
|
|
goto next;
|
|
|
|
|
2020-09-24 00:29:54 +00:00
|
|
|
if (list_empty(&msk->conn_list)) {
|
|
|
|
mptcp_pm_remove_anno_addr(msk, addr, false);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
|
2022-03-07 20:44:36 +00:00
|
|
|
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
|
|
|
|
!(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
|
2020-09-24 00:29:54 +00:00
|
|
|
if (remove_subflow)
|
2021-03-13 01:16:16 +00:00
|
|
|
mptcp_pm_remove_subflow(msk, &list);
|
2020-09-24 00:29:54 +00:00
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
next:
|
|
|
|
sock_put(sk);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-31 00:08:53 +00:00
|
|
|
static int mptcp_nl_remove_id_zero_address(struct net *net,
|
|
|
|
struct mptcp_addr_info *addr)
|
|
|
|
{
|
|
|
|
struct mptcp_rm_list list = { .nr = 0 };
|
|
|
|
long s_slot = 0, s_num = 0;
|
|
|
|
struct mptcp_sock *msk;
|
|
|
|
|
|
|
|
list.ids[list.nr++] = 0;
|
|
|
|
|
|
|
|
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
struct mptcp_addr_info msk_local;
|
|
|
|
|
2022-04-27 22:50:00 +00:00
|
|
|
if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
|
2021-03-31 00:08:53 +00:00
|
|
|
goto next;
|
|
|
|
|
2023-06-08 13:20:49 +00:00
|
|
|
mptcp_local_address((struct sock_common *)msk, &msk_local);
|
2022-05-04 02:38:49 +00:00
|
|
|
if (!mptcp_addresses_equal(&msk_local, addr, addr->port))
|
2021-03-31 00:08:53 +00:00
|
|
|
goto next;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
|
|
mptcp_pm_remove_addr(msk, &list);
|
|
|
|
mptcp_pm_nl_rm_subflow_received(msk, &list);
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
next:
|
|
|
|
sock_put(sk);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-23 18:17:11 +00:00
|
|
|
int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2023-10-23 18:17:07 +00:00
|
|
|
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
|
2020-03-27 21:48:51 +00:00
|
|
|
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
|
|
|
|
struct mptcp_pm_addr_entry addr, *entry;
|
2021-02-01 23:09:06 +00:00
|
|
|
unsigned int addr_max;
|
2020-03-27 21:48:51 +00:00
|
|
|
int ret;
|
|
|
|
|
2022-05-04 02:38:51 +00:00
|
|
|
ret = mptcp_pm_parse_entry(attr, info, false, &addr);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2021-03-31 00:08:53 +00:00
|
|
|
/* the zero id address is special: the first address used by the msk
|
|
|
|
* always gets such an id, so different subflows can have different zero
|
|
|
|
* id addresses. Additionally zero id is not accounted for in id_bitmap.
|
|
|
|
* Let's use an 'mptcp_rm_list' instead of the common remove code.
|
|
|
|
*/
|
|
|
|
if (addr.addr.id == 0)
|
|
|
|
return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr);
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
spin_lock_bh(&pernet->lock);
|
|
|
|
entry = __lookup_addr_by_id(pernet, addr.addr.id);
|
|
|
|
if (!entry) {
|
|
|
|
GENL_SET_ERR_MSG(info, "address not found");
|
2020-09-24 00:29:54 +00:00
|
|
|
spin_unlock_bh(&pernet->lock);
|
|
|
|
return -EINVAL;
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
2021-04-07 00:15:57 +00:00
|
|
|
if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
|
2021-02-01 23:09:06 +00:00
|
|
|
addr_max = pernet->add_addr_signal_max;
|
|
|
|
WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1);
|
|
|
|
}
|
2021-04-07 00:15:57 +00:00
|
|
|
if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
|
2021-02-01 23:09:06 +00:00
|
|
|
addr_max = pernet->local_addr_max;
|
|
|
|
WRITE_ONCE(pernet->local_addr_max, addr_max - 1);
|
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
pernet->addrs--;
|
|
|
|
list_del_rcu(&entry->list);
|
2021-01-09 00:47:55 +00:00
|
|
|
__clear_bit(entry->addr.id, pernet->id_bitmap);
|
2020-03-27 21:48:51 +00:00
|
|
|
spin_unlock_bh(&pernet->lock);
|
2020-09-24 00:29:54 +00:00
|
|
|
|
2022-03-07 20:44:36 +00:00
|
|
|
mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry);
|
2021-08-18 23:42:36 +00:00
|
|
|
synchronize_rcu();
|
|
|
|
__mptcp_pm_release_addr_entry(entry);
|
2020-09-24 00:29:54 +00:00
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-06-05 03:25:17 +00:00
|
|
|
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
|
|
|
|
{
|
|
|
|
struct mptcp_rm_list alist = { .nr = 0 };
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
2024-07-27 09:03:59 +00:00
|
|
|
int anno_nr = 0;
|
2023-06-05 03:25:17 +00:00
|
|
|
|
|
|
|
list_for_each_entry(entry, rm_list, list) {
|
2024-07-27 09:03:59 +00:00
|
|
|
if (alist.nr >= MPTCP_RM_IDS_MAX)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* only delete if either announced or matching a subflow */
|
|
|
|
if (remove_anno_list_by_saddr(msk, &entry->addr))
|
|
|
|
anno_nr++;
|
|
|
|
else if (!lookup_subflow_by_saddr(&msk->conn_list,
|
|
|
|
&entry->addr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
alist.ids[alist.nr++] = entry->addr.id;
|
2023-06-05 03:25:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (alist.nr) {
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2024-07-27 09:03:59 +00:00
|
|
|
msk->pm.add_addr_signaled -= anno_nr;
|
2023-06-05 03:25:17 +00:00
|
|
|
mptcp_pm_remove_addr(msk, &alist);
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:25 +00:00
|
|
|
static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
|
|
|
|
struct list_head *rm_list)
|
2021-03-13 01:16:17 +00:00
|
|
|
{
|
|
|
|
struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
|
|
|
|
list_for_each_entry(entry, rm_list, list) {
|
2024-07-27 09:04:00 +00:00
|
|
|
if (slist.nr < MPTCP_RM_IDS_MAX &&
|
|
|
|
lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
|
2021-03-13 01:16:17 +00:00
|
|
|
slist.ids[slist.nr++] = entry->addr.id;
|
2022-03-07 20:44:35 +00:00
|
|
|
|
2024-07-27 09:04:00 +00:00
|
|
|
if (alist.nr < MPTCP_RM_IDS_MAX &&
|
|
|
|
remove_anno_list_by_saddr(msk, &entry->addr))
|
2021-03-13 01:16:17 +00:00
|
|
|
alist.ids[alist.nr++] = entry->addr.id;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (alist.nr) {
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2024-07-27 09:04:00 +00:00
|
|
|
msk->pm.add_addr_signaled -= alist.nr;
|
2021-03-13 01:16:17 +00:00
|
|
|
mptcp_pm_remove_addr(msk, &alist);
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
}
|
|
|
|
if (slist.nr)
|
|
|
|
mptcp_pm_remove_subflow(msk, &slist);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mptcp_nl_remove_addrs_list(struct net *net,
|
|
|
|
struct list_head *rm_list)
|
|
|
|
{
|
|
|
|
long s_slot = 0, s_num = 0;
|
|
|
|
struct mptcp_sock *msk;
|
|
|
|
|
|
|
|
if (list_empty(rm_list))
|
|
|
|
return;
|
|
|
|
|
|
|
|
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
|
2022-04-27 22:50:00 +00:00
|
|
|
if (!mptcp_pm_is_userspace(msk)) {
|
|
|
|
lock_sock(sk);
|
|
|
|
mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
|
|
|
|
release_sock(sk);
|
|
|
|
}
|
2021-03-13 01:16:17 +00:00
|
|
|
|
|
|
|
sock_put(sk);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-18 23:42:36 +00:00
|
|
|
/* caller must ensure the RCU grace period is already elapsed */
|
2021-03-13 01:16:18 +00:00
|
|
|
static void __flush_addrs(struct list_head *list)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2020-12-10 22:24:59 +00:00
|
|
|
while (!list_empty(list)) {
|
2020-03-27 21:48:51 +00:00
|
|
|
struct mptcp_pm_addr_entry *cur;
|
|
|
|
|
2020-12-10 22:24:59 +00:00
|
|
|
cur = list_entry(list->next,
|
2020-03-27 21:48:51 +00:00
|
|
|
struct mptcp_pm_addr_entry, list);
|
|
|
|
list_del_rcu(&cur->list);
|
2021-08-18 23:42:36 +00:00
|
|
|
__mptcp_pm_release_addr_entry(cur);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __reset_counters(struct pm_nl_pernet *pernet)
|
|
|
|
{
|
2021-02-01 23:09:06 +00:00
|
|
|
WRITE_ONCE(pernet->add_addr_signal_max, 0);
|
|
|
|
WRITE_ONCE(pernet->add_addr_accept_max, 0);
|
|
|
|
WRITE_ONCE(pernet->local_addr_max, 0);
|
2020-03-27 21:48:51 +00:00
|
|
|
pernet->addrs = 0;
|
|
|
|
}
|
|
|
|
|
2023-10-23 18:17:11 +00:00
|
|
|
int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
|
2020-12-10 22:24:59 +00:00
|
|
|
LIST_HEAD(free_list);
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&pernet->lock);
|
2020-12-10 22:24:59 +00:00
|
|
|
list_splice_init(&pernet->local_addr_list, &free_list);
|
2020-03-27 21:48:51 +00:00
|
|
|
__reset_counters(pernet);
|
2021-01-09 00:47:55 +00:00
|
|
|
pernet->next_id = 1;
|
2022-01-07 00:20:22 +00:00
|
|
|
bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
|
2020-03-27 21:48:51 +00:00
|
|
|
spin_unlock_bh(&pernet->lock);
|
2021-03-13 01:16:18 +00:00
|
|
|
mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
|
2021-08-18 23:42:36 +00:00
|
|
|
synchronize_rcu();
|
2021-03-13 01:16:18 +00:00
|
|
|
__flush_addrs(&free_list);
|
2020-03-27 21:48:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:26 +00:00
|
|
|
int mptcp_nl_fill_addr(struct sk_buff *skb,
|
|
|
|
struct mptcp_pm_addr_entry *entry)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct mptcp_addr_info *addr = &entry->addr;
|
|
|
|
struct nlattr *attr;
|
|
|
|
|
|
|
|
attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR);
|
|
|
|
if (!attr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family))
|
|
|
|
goto nla_put_failure;
|
2021-02-01 23:09:17 +00:00
|
|
|
if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port)))
|
|
|
|
goto nla_put_failure;
|
2020-03-27 21:48:51 +00:00
|
|
|
if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
|
|
|
|
goto nla_put_failure;
|
2021-04-07 00:15:57 +00:00
|
|
|
if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
|
2020-03-27 21:48:51 +00:00
|
|
|
goto nla_put_failure;
|
2021-04-07 00:15:57 +00:00
|
|
|
if (entry->ifindex &&
|
|
|
|
nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
|
2020-03-27 21:48:51 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
2020-04-23 02:10:03 +00:00
|
|
|
if (addr->family == AF_INET &&
|
|
|
|
nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
|
|
|
|
addr->addr.s_addr))
|
|
|
|
goto nla_put_failure;
|
2020-03-27 21:48:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
2020-04-23 02:10:03 +00:00
|
|
|
else if (addr->family == AF_INET6 &&
|
|
|
|
nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6))
|
|
|
|
goto nla_put_failure;
|
2020-03-27 21:48:51 +00:00
|
|
|
#endif
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nla_nest_cancel(skb, attr);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:37 +00:00
|
|
|
int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
2023-10-23 18:17:07 +00:00
|
|
|
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
|
2020-03-27 21:48:51 +00:00
|
|
|
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
|
|
|
|
struct mptcp_pm_addr_entry addr, *entry;
|
|
|
|
struct sk_buff *msg;
|
|
|
|
void *reply;
|
|
|
|
int ret;
|
|
|
|
|
2022-05-04 02:38:51 +00:00
|
|
|
ret = mptcp_pm_parse_entry(attr, info, false, &addr);
|
2020-03-27 21:48:51 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
|
|
|
|
info->genlhdr->cmd);
|
|
|
|
if (!reply) {
|
|
|
|
GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
|
|
|
|
ret = -EMSGSIZE;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_bh(&pernet->lock);
|
|
|
|
entry = __lookup_addr_by_id(pernet, addr.addr.id);
|
|
|
|
if (!entry) {
|
|
|
|
GENL_SET_ERR_MSG(info, "address not found");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto unlock_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = mptcp_nl_fill_addr(msg, entry);
|
|
|
|
if (ret)
|
|
|
|
goto unlock_fail;
|
|
|
|
|
|
|
|
genlmsg_end(msg, reply);
|
|
|
|
ret = genlmsg_reply(msg, info);
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
unlock_fail:
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
nlmsg_free(msg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:37 +00:00
|
|
|
int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
return mptcp_pm_get_addr(skb, info);
|
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:29 +00:00
|
|
|
int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
|
|
|
|
struct netlink_callback *cb)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct net *net = sock_net(msg->sk);
|
|
|
|
struct mptcp_pm_addr_entry *entry;
|
|
|
|
struct pm_nl_pernet *pernet;
|
|
|
|
int id = cb->args[0];
|
|
|
|
void *hdr;
|
2021-01-09 00:47:55 +00:00
|
|
|
int i;
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2022-04-08 19:45:57 +00:00
|
|
|
pernet = pm_nl_get_pernet(net);
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&pernet->lock);
|
2022-01-07 00:20:22 +00:00
|
|
|
for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
|
2021-01-09 00:47:55 +00:00
|
|
|
if (test_bit(i, pernet->id_bitmap)) {
|
|
|
|
entry = __lookup_addr_by_id(pernet, i);
|
|
|
|
if (!entry)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (entry->addr.id <= id)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq, &mptcp_genl_family,
|
|
|
|
NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
|
|
|
|
if (!hdr)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (mptcp_nl_fill_addr(msg, entry) < 0) {
|
|
|
|
genlmsg_cancel(msg, hdr);
|
|
|
|
break;
|
|
|
|
}
|
2020-03-27 21:48:51 +00:00
|
|
|
|
2021-01-09 00:47:55 +00:00
|
|
|
id = entry->addr.id;
|
|
|
|
genlmsg_end(msg, hdr);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
|
|
|
|
|
|
|
cb->args[0] = id;
|
|
|
|
return msg->len;
|
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:29 +00:00
|
|
|
int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
|
|
|
|
struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
return mptcp_pm_dump_addr(msg, cb);
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
|
|
|
|
{
|
|
|
|
struct nlattr *attr = info->attrs[id];
|
|
|
|
|
|
|
|
if (!attr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*limit = nla_get_u32(attr);
|
|
|
|
if (*limit > MPTCP_PM_ADDR_MAX) {
|
|
|
|
GENL_SET_ERR_MSG(info, "limit greater than maximum");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-23 18:17:11 +00:00
|
|
|
int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
|
|
|
|
unsigned int rcv_addrs, subflows;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_bh(&pernet->lock);
|
|
|
|
rcv_addrs = pernet->add_addr_accept_max;
|
|
|
|
ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs);
|
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
subflows = pernet->subflows_max;
|
|
|
|
ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
|
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
|
|
|
|
WRITE_ONCE(pernet->subflows_max, subflows);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-10-23 18:17:11 +00:00
|
|
|
int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
|
|
|
|
struct sk_buff *msg;
|
|
|
|
void *reply;
|
|
|
|
|
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
|
|
|
|
MPTCP_PM_CMD_GET_LIMITS);
|
|
|
|
if (!reply)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS,
|
|
|
|
READ_ONCE(pernet->add_addr_accept_max)))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
|
|
|
|
READ_ONCE(pernet->subflows_max)))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
genlmsg_end(msg, reply);
|
|
|
|
return genlmsg_reply(msg, info);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
|
|
|
|
nlmsg_free(msg);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2022-02-03 01:03:41 +00:00
|
|
|
static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
|
|
|
|
struct mptcp_addr_info *addr)
|
|
|
|
{
|
|
|
|
struct mptcp_rm_list list = { .nr = 0 };
|
|
|
|
|
|
|
|
list.ids[list.nr++] = addr->id;
|
|
|
|
|
2022-07-05 21:32:12 +00:00
|
|
|
spin_lock_bh(&msk->pm.lock);
|
2022-02-03 01:03:41 +00:00
|
|
|
mptcp_pm_nl_rm_subflow_received(msk, &list);
|
|
|
|
mptcp_pm_create_subflow_or_signal_addr(msk);
|
2022-07-05 21:32:12 +00:00
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
2022-02-03 01:03:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mptcp_nl_set_flags(struct net *net,
|
|
|
|
struct mptcp_addr_info *addr,
|
|
|
|
u8 bkup, u8 changed)
|
2021-01-09 00:47:59 +00:00
|
|
|
{
|
|
|
|
long s_slot = 0, s_num = 0;
|
|
|
|
struct mptcp_sock *msk;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
|
|
|
|
struct sock *sk = (struct sock *)msk;
|
|
|
|
|
2022-04-27 22:50:00 +00:00
|
|
|
if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
|
2021-01-09 00:47:59 +00:00
|
|
|
goto next;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
2022-02-03 01:03:41 +00:00
|
|
|
if (changed & MPTCP_PM_ADDR_FLAG_BACKUP)
|
2022-07-05 21:32:14 +00:00
|
|
|
ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup);
|
2022-02-03 01:03:41 +00:00
|
|
|
if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)
|
|
|
|
mptcp_pm_nl_fullmesh(msk, addr);
|
2021-01-09 00:47:59 +00:00
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
next:
|
|
|
|
sock_put(sk);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-03-05 11:04:31 +00:00
|
|
|
int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
|
2023-06-08 13:20:52 +00:00
|
|
|
{
|
2024-03-05 11:04:31 +00:00
|
|
|
struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
|
|
|
|
struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
|
2023-06-08 13:20:52 +00:00
|
|
|
u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
|
|
|
|
MPTCP_PM_ADDR_FLAG_FULLMESH;
|
2024-03-05 11:04:31 +00:00
|
|
|
struct net *net = sock_net(skb->sk);
|
2023-06-08 13:20:52 +00:00
|
|
|
struct mptcp_pm_addr_entry *entry;
|
2024-03-05 11:04:31 +00:00
|
|
|
struct pm_nl_pernet *pernet;
|
2023-06-08 13:20:52 +00:00
|
|
|
u8 lookup_by_id = 0;
|
2024-03-05 11:04:31 +00:00
|
|
|
u8 bkup = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pernet = pm_nl_get_pernet(net);
|
2023-06-08 13:20:52 +00:00
|
|
|
|
2024-03-05 11:04:31 +00:00
|
|
|
ret = mptcp_pm_parse_entry(attr, info, false, &addr);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (addr.addr.family == AF_UNSPEC) {
|
2023-06-08 13:20:52 +00:00
|
|
|
lookup_by_id = 1;
|
2024-03-05 11:04:32 +00:00
|
|
|
if (!addr.addr.id) {
|
|
|
|
GENL_SET_ERR_MSG(info, "missing required inputs");
|
2023-06-08 13:20:52 +00:00
|
|
|
return -EOPNOTSUPP;
|
2024-03-05 11:04:32 +00:00
|
|
|
}
|
2023-06-08 13:20:52 +00:00
|
|
|
}
|
|
|
|
|
2024-03-05 11:04:31 +00:00
|
|
|
if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
|
|
|
|
bkup = 1;
|
|
|
|
|
2023-06-08 13:20:52 +00:00
|
|
|
spin_lock_bh(&pernet->lock);
|
2024-03-05 11:04:33 +00:00
|
|
|
entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) :
|
|
|
|
__lookup_addr(pernet, &addr.addr);
|
2023-06-08 13:20:52 +00:00
|
|
|
if (!entry) {
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
2024-03-05 11:04:32 +00:00
|
|
|
GENL_SET_ERR_MSG(info, "address not found");
|
2023-06-08 13:20:52 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2024-03-05 11:04:31 +00:00
|
|
|
if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
|
2023-06-08 13:20:52 +00:00
|
|
|
(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
|
|
|
|
spin_unlock_bh(&pernet->lock);
|
2024-03-05 11:04:32 +00:00
|
|
|
GENL_SET_ERR_MSG(info, "invalid addr flags");
|
2023-06-08 13:20:52 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-03-05 11:04:31 +00:00
|
|
|
changed = (addr.flags ^ entry->flags) & mask;
|
|
|
|
entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
|
|
|
|
addr = *entry;
|
2023-06-08 13:20:52 +00:00
|
|
|
spin_unlock_bh(&pernet->lock);
|
|
|
|
|
2024-03-05 11:04:31 +00:00
|
|
|
mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
|
2023-06-08 13:20:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-23 18:17:11 +00:00
|
|
|
int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
|
2021-01-09 00:47:59 +00:00
|
|
|
{
|
2024-03-05 11:04:31 +00:00
|
|
|
return mptcp_pm_set_flags(skb, info);
|
2021-01-09 00:47:59 +00:00
|
|
|
}
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
|
|
|
|
{
|
|
|
|
genlmsg_multicast_netns(&mptcp_genl_family, net,
|
|
|
|
nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp);
|
|
|
|
}
|
|
|
|
|
2022-05-02 20:52:31 +00:00
|
|
|
bool mptcp_userspace_pm_active(const struct mptcp_sock *msk)
|
|
|
|
{
|
|
|
|
return genl_has_listeners(&mptcp_genl_family,
|
|
|
|
sock_net((const struct sock *)msk),
|
|
|
|
MPTCP_PM_EV_GRP_OFFSET);
|
|
|
|
}
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
|
|
|
|
{
|
|
|
|
const struct inet_sock *issk = inet_sk(ssk);
|
|
|
|
const struct mptcp_subflow_context *sf;
|
|
|
|
|
|
|
|
if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
switch (ssk->sk_family) {
|
|
|
|
case AF_INET:
|
|
|
|
if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
case AF_INET6: {
|
|
|
|
const struct ipv6_pinfo *np = inet6_sk(ssk);
|
|
|
|
|
|
|
|
if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
sf = mptcp_subflow_ctx(ssk);
|
|
|
|
if (WARN_ON_ONCE(!sf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2024-02-15 18:25:31 +00:00
|
|
|
if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
|
2021-02-13 00:00:01 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
|
|
|
|
const struct mptcp_sock *msk,
|
|
|
|
const struct sock *ssk)
|
|
|
|
{
|
|
|
|
const struct sock *sk = (const struct sock *)msk;
|
|
|
|
const struct mptcp_subflow_context *sf;
|
|
|
|
u8 sk_err;
|
|
|
|
|
2024-02-02 11:40:10 +00:00
|
|
|
if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
|
2021-02-13 00:00:01 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (mptcp_event_add_subflow(skb, ssk))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
sf = mptcp_subflow_ctx(ssk);
|
|
|
|
if (WARN_ON_ONCE(!sf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (ssk->sk_bound_dev_if &&
|
|
|
|
nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2023-03-15 20:57:45 +00:00
|
|
|
sk_err = READ_ONCE(ssk->sk_err);
|
2021-02-13 00:00:01 +00:00
|
|
|
if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
|
|
|
|
nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mptcp_event_sub_established(struct sk_buff *skb,
|
|
|
|
const struct mptcp_sock *msk,
|
|
|
|
const struct sock *ssk)
|
|
|
|
{
|
|
|
|
return mptcp_event_put_token_and_ssk(skb, msk, ssk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mptcp_event_sub_closed(struct sk_buff *skb,
|
|
|
|
const struct mptcp_sock *msk,
|
|
|
|
const struct sock *ssk)
|
|
|
|
{
|
2021-04-01 23:19:44 +00:00
|
|
|
const struct mptcp_subflow_context *sf;
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
if (mptcp_event_put_token_and_ssk(skb, msk, ssk))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2021-04-01 23:19:44 +00:00
|
|
|
sf = mptcp_subflow_ctx(ssk);
|
|
|
|
if (!sf->reset_seen)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mptcp_event_created(struct sk_buff *skb,
|
|
|
|
const struct mptcp_sock *msk,
|
|
|
|
const struct sock *ssk)
|
|
|
|
{
|
2024-02-02 11:40:10 +00:00
|
|
|
int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
|
2021-02-13 00:00:01 +00:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2022-05-02 20:52:36 +00:00
|
|
|
if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
return mptcp_event_add_subflow(skb, ssk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
|
|
|
|
{
|
|
|
|
struct net *net = sock_net((const struct sock *)msk);
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED);
|
|
|
|
if (!nlh)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2024-02-02 11:40:10 +00:00
|
|
|
if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
|
2021-02-13 00:00:01 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
genlmsg_end(skb, nlh);
|
|
|
|
mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
|
|
|
|
return;
|
|
|
|
|
|
|
|
nla_put_failure:
|
2022-12-09 00:44:30 +00:00
|
|
|
nlmsg_free(skb);
|
2021-02-13 00:00:01 +00:00
|
|
|
}
|
|
|
|
|
2022-05-02 20:52:34 +00:00
|
|
|
void mptcp_event_addr_announced(const struct sock *ssk,
|
2021-02-13 00:00:01 +00:00
|
|
|
const struct mptcp_addr_info *info)
|
|
|
|
{
|
2022-05-02 20:52:34 +00:00
|
|
|
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
|
|
|
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
|
|
|
struct net *net = sock_net(ssk);
|
2021-02-13 00:00:01 +00:00
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0,
|
|
|
|
MPTCP_EVENT_ANNOUNCED);
|
|
|
|
if (!nlh)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2024-02-02 11:40:10 +00:00
|
|
|
if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
|
2021-02-13 00:00:01 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2022-05-02 20:52:34 +00:00
|
|
|
if (nla_put_be16(skb, MPTCP_ATTR_DPORT,
|
|
|
|
info->port == 0 ?
|
|
|
|
inet_sk(ssk)->inet_dport :
|
|
|
|
info->port))
|
2021-02-13 00:00:01 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
switch (info->family) {
|
|
|
|
case AF_INET:
|
|
|
|
if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr))
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
case AF_INET6:
|
|
|
|
if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6))
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
genlmsg_end(skb, nlh);
|
|
|
|
mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
|
|
|
|
return;
|
|
|
|
|
|
|
|
nla_put_failure:
|
2022-12-09 00:44:30 +00:00
|
|
|
nlmsg_free(skb);
|
2021-02-13 00:00:01 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 14:06:28 +00:00
|
|
|
void mptcp_event_pm_listener(const struct sock *ssk,
|
|
|
|
enum mptcp_event_type event)
|
|
|
|
{
|
|
|
|
const struct inet_sock *issk = inet_sk(ssk);
|
|
|
|
struct net *net = sock_net(ssk);
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event);
|
|
|
|
if (!nlh)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
switch (ssk->sk_family) {
|
|
|
|
case AF_INET:
|
|
|
|
if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr))
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
|
|
|
case AF_INET6: {
|
|
|
|
const struct ipv6_pinfo *np = inet6_sk(ssk);
|
|
|
|
|
|
|
|
if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr))
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
genlmsg_end(skb, nlh);
|
|
|
|
mptcp_nl_mcast_send(net, skb, GFP_KERNEL);
|
|
|
|
return;
|
|
|
|
|
|
|
|
nla_put_failure:
|
2022-12-09 00:44:30 +00:00
|
|
|
nlmsg_free(skb);
|
2022-11-30 14:06:28 +00:00
|
|
|
}
|
|
|
|
|
2021-02-13 00:00:01 +00:00
|
|
|
void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
|
|
|
|
const struct sock *ssk, gfp_t gfp)
|
|
|
|
{
|
|
|
|
struct net *net = sock_net((const struct sock *)msk);
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type);
|
|
|
|
if (!nlh)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case MPTCP_EVENT_UNSPEC:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
break;
|
|
|
|
case MPTCP_EVENT_CREATED:
|
|
|
|
case MPTCP_EVENT_ESTABLISHED:
|
|
|
|
if (mptcp_event_created(skb, msk, ssk) < 0)
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
case MPTCP_EVENT_CLOSED:
|
2024-02-02 11:40:10 +00:00
|
|
|
if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0)
|
2021-02-13 00:00:01 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
case MPTCP_EVENT_ANNOUNCED:
|
|
|
|
case MPTCP_EVENT_REMOVED:
|
|
|
|
/* call mptcp_event_addr_announced()/removed instead */
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
break;
|
|
|
|
case MPTCP_EVENT_SUB_ESTABLISHED:
|
|
|
|
case MPTCP_EVENT_SUB_PRIORITY:
|
|
|
|
if (mptcp_event_sub_established(skb, msk, ssk) < 0)
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
|
|
|
case MPTCP_EVENT_SUB_CLOSED:
|
|
|
|
if (mptcp_event_sub_closed(skb, msk, ssk) < 0)
|
|
|
|
goto nla_put_failure;
|
|
|
|
break;
|
2022-11-30 14:06:28 +00:00
|
|
|
case MPTCP_EVENT_LISTENER_CREATED:
|
|
|
|
case MPTCP_EVENT_LISTENER_CLOSED:
|
|
|
|
break;
|
2021-02-13 00:00:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
genlmsg_end(skb, nlh);
|
|
|
|
mptcp_nl_mcast_send(net, skb, gfp);
|
|
|
|
return;
|
|
|
|
|
|
|
|
nla_put_failure:
|
2022-12-09 00:44:30 +00:00
|
|
|
nlmsg_free(skb);
|
2021-02-13 00:00:01 +00:00
|
|
|
}
|
|
|
|
|
2024-03-01 18:18:26 +00:00
|
|
|
struct genl_family mptcp_genl_family __ro_after_init = {
|
2020-03-27 21:48:51 +00:00
|
|
|
.name = MPTCP_PM_NAME,
|
|
|
|
.version = MPTCP_PM_VER,
|
|
|
|
.netnsok = true,
|
|
|
|
.module = THIS_MODULE,
|
2023-10-23 18:17:10 +00:00
|
|
|
.ops = mptcp_pm_nl_ops,
|
|
|
|
.n_ops = ARRAY_SIZE(mptcp_pm_nl_ops),
|
2022-08-25 00:18:30 +00:00
|
|
|
.resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1,
|
2020-03-27 21:48:51 +00:00
|
|
|
.mcgrps = mptcp_pm_mcgrps,
|
|
|
|
.n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps),
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __net_init pm_nl_init_net(struct net *net)
|
|
|
|
{
|
2022-04-08 19:45:57 +00:00
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
|
2020-03-27 21:48:51 +00:00
|
|
|
|
|
|
|
INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
|
2021-10-15 23:05:51 +00:00
|
|
|
|
|
|
|
/* Cit. 2 subflows ought to be enough for anybody. */
|
|
|
|
pernet->subflows_max = 2;
|
2020-03-27 21:48:51 +00:00
|
|
|
pernet->next_id = 1;
|
2021-08-13 22:15:45 +00:00
|
|
|
pernet->stale_loss_cnt = 4;
|
2020-03-27 21:48:51 +00:00
|
|
|
spin_lock_init(&pernet->lock);
|
2021-05-27 23:54:27 +00:00
|
|
|
|
|
|
|
/* No need to initialize other pernet fields, the struct is zeroed at
|
|
|
|
* allocation time.
|
|
|
|
*/
|
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit pm_nl_exit_net(struct list_head *net_list)
|
|
|
|
{
|
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
list_for_each_entry(net, net_list, exit_list) {
|
2022-04-08 19:45:57 +00:00
|
|
|
struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
|
2020-12-10 22:24:59 +00:00
|
|
|
|
2020-03-27 21:48:51 +00:00
|
|
|
/* net is removed from namespace list, can't race with
|
2021-08-18 23:42:36 +00:00
|
|
|
* other modifiers, also netns core already waited for a
|
|
|
|
* RCU grace period.
|
2020-03-27 21:48:51 +00:00
|
|
|
*/
|
2021-03-13 01:16:18 +00:00
|
|
|
__flush_addrs(&pernet->local_addr_list);
|
2020-03-27 21:48:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations mptcp_pm_pernet_ops = {
|
|
|
|
.init = pm_nl_init_net,
|
|
|
|
.exit_batch = pm_nl_exit_net,
|
|
|
|
.id = &pm_nl_pernet_id,
|
|
|
|
.size = sizeof(struct pm_nl_pernet),
|
|
|
|
};
|
|
|
|
|
2020-06-26 17:29:59 +00:00
|
|
|
void __init mptcp_pm_nl_init(void)
|
2020-03-27 21:48:51 +00:00
|
|
|
{
|
|
|
|
if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0)
|
|
|
|
panic("Failed to register MPTCP PM pernet subsystem.\n");
|
|
|
|
|
|
|
|
if (genl_register_family(&mptcp_genl_family))
|
|
|
|
panic("Failed to register MPTCP PM netlink family\n");
|
|
|
|
}
|