mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
ed1ad86b85
This patch defines the default packet scheduler mptcp_sched_default. Register it in mptcp_sched_init(), which is invoked in mptcp_proto_init(). Skip deleting this default scheduler in mptcp_unregister_scheduler(). Set msk->sched to the default scheduler when the input parameter of mptcp_init_sched() is NULL. Invoke mptcp_sched_default_get_subflow in get_send() and get_retrans() if the defaut scheduler is set or msk->sched is NULL. Acked-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Geliang Tang <geliang.tang@suse.com> Signed-off-by: Mat Martineau <martineau@kernel.org> Link: https://lore.kernel.org/r/20230821-upstream-net-next-20230818-v1-10-0c860fb256a8@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
174 lines
3.7 KiB
C
174 lines
3.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Multipath TCP
|
|
*
|
|
* Copyright (c) 2022, SUSE.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "MPTCP: " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/list.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/spinlock.h>
|
|
#include "protocol.h"
|
|
|
|
static DEFINE_SPINLOCK(mptcp_sched_list_lock);
|
|
static LIST_HEAD(mptcp_sched_list);
|
|
|
|
static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
|
|
struct mptcp_sched_data *data)
|
|
{
|
|
struct sock *ssk;
|
|
|
|
ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
|
|
mptcp_subflow_get_send(msk);
|
|
if (!ssk)
|
|
return -EINVAL;
|
|
|
|
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
|
|
return 0;
|
|
}
|
|
|
|
static struct mptcp_sched_ops mptcp_sched_default = {
|
|
.get_subflow = mptcp_sched_default_get_subflow,
|
|
.name = "default",
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
/* Must be called with rcu read lock held */
|
|
struct mptcp_sched_ops *mptcp_sched_find(const char *name)
|
|
{
|
|
struct mptcp_sched_ops *sched, *ret = NULL;
|
|
|
|
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
|
|
if (!strcmp(sched->name, name)) {
|
|
ret = sched;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
|
|
{
|
|
if (!sched->get_subflow)
|
|
return -EINVAL;
|
|
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
if (mptcp_sched_find(sched->name)) {
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
return -EEXIST;
|
|
}
|
|
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
|
|
pr_debug("%s registered", sched->name);
|
|
return 0;
|
|
}
|
|
|
|
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
|
|
{
|
|
if (sched == &mptcp_sched_default)
|
|
return;
|
|
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
list_del_rcu(&sched->list);
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
}
|
|
|
|
void mptcp_sched_init(void)
|
|
{
|
|
mptcp_register_scheduler(&mptcp_sched_default);
|
|
}
|
|
|
|
int mptcp_init_sched(struct mptcp_sock *msk,
|
|
struct mptcp_sched_ops *sched)
|
|
{
|
|
if (!sched)
|
|
sched = &mptcp_sched_default;
|
|
|
|
if (!bpf_try_module_get(sched, sched->owner))
|
|
return -EBUSY;
|
|
|
|
msk->sched = sched;
|
|
if (msk->sched->init)
|
|
msk->sched->init(msk);
|
|
|
|
pr_debug("sched=%s", msk->sched->name);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void mptcp_release_sched(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_sched_ops *sched = msk->sched;
|
|
|
|
if (!sched)
|
|
return;
|
|
|
|
msk->sched = NULL;
|
|
if (sched->release)
|
|
sched->release(msk);
|
|
|
|
bpf_module_put(sched, sched->owner);
|
|
}
|
|
|
|
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
|
|
bool scheduled)
|
|
{
|
|
WRITE_ONCE(subflow->scheduled, scheduled);
|
|
}
|
|
|
|
int mptcp_sched_get_send(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_subflow_context *subflow;
|
|
struct mptcp_sched_data data;
|
|
|
|
msk_owned_by_me(msk);
|
|
|
|
/* the following check is moved out of mptcp_subflow_get_send */
|
|
if (__mptcp_check_fallback(msk)) {
|
|
if (msk->first &&
|
|
__tcp_can_send(msk->first) &&
|
|
sk_stream_memory_free(msk->first)) {
|
|
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
|
|
return 0;
|
|
}
|
|
return -EINVAL;
|
|
}
|
|
|
|
mptcp_for_each_subflow(msk, subflow) {
|
|
if (READ_ONCE(subflow->scheduled))
|
|
return 0;
|
|
}
|
|
|
|
data.reinject = false;
|
|
if (msk->sched == &mptcp_sched_default || !msk->sched)
|
|
return mptcp_sched_default_get_subflow(msk, &data);
|
|
return msk->sched->get_subflow(msk, &data);
|
|
}
|
|
|
|
int mptcp_sched_get_retrans(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_subflow_context *subflow;
|
|
struct mptcp_sched_data data;
|
|
|
|
msk_owned_by_me(msk);
|
|
|
|
/* the following check is moved out of mptcp_subflow_get_retrans */
|
|
if (__mptcp_check_fallback(msk))
|
|
return -EINVAL;
|
|
|
|
mptcp_for_each_subflow(msk, subflow) {
|
|
if (READ_ONCE(subflow->scheduled))
|
|
return 0;
|
|
}
|
|
|
|
data.reinject = true;
|
|
if (msk->sched == &mptcp_sched_default || !msk->sched)
|
|
return mptcp_sched_default_get_subflow(msk, &data);
|
|
return msk->sched->get_subflow(msk, &data);
|
|
}
|