mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
net: use flow_indr_dev_setup_offload()
Update existing frontends to use flow_indr_dev_setup_offload(). This new function must be called if ->ndo_setup_tc is unset to deal with tunnel devices. If there is no driver that is subscribed to new tunnel device flow_block bindings, then this function bails out with EOPNOTSUPP. If the driver module is removed, the ->cleanup() callback removes the entries that belong to this tunnel device. This cleanup procedures is triggered when the device unregisters the tunnel device offload handler. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
324a823b99
commit
0fdcf78d59
@ -942,6 +942,18 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
|
||||
INIT_LIST_HEAD(&bo->cb_list);
|
||||
}
|
||||
|
||||
static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
|
||||
{
|
||||
struct nf_flowtable *flowtable = block_cb->indr.data;
|
||||
struct net_device *dev = block_cb->indr.dev;
|
||||
|
||||
nf_flow_table_gc_cleanup(flowtable, dev);
|
||||
down_write(&flowtable->flow_block_lock);
|
||||
list_del(&block_cb->list);
|
||||
flow_block_cb_free(block_cb);
|
||||
up_write(&flowtable->flow_block_lock);
|
||||
}
|
||||
|
||||
static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
|
||||
struct nf_flowtable *flowtable,
|
||||
struct net_device *dev,
|
||||
@ -950,12 +962,9 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
|
||||
{
|
||||
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
|
||||
extack);
|
||||
flow_indr_block_call(dev, bo, cmd, TC_SETUP_FT);
|
||||
|
||||
if (list_empty(&bo->cb_list))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo,
|
||||
nf_flow_table_indr_cleanup);
|
||||
}
|
||||
|
||||
static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
|
||||
|
@ -304,21 +304,41 @@ static void nft_indr_block_ing_cmd(struct net_device *dev,
|
||||
nft_block_setup(chain, &bo, cmd);
|
||||
}
|
||||
|
||||
static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
|
||||
static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
|
||||
{
|
||||
struct nft_base_chain *basechain = block_cb->indr.data;
|
||||
struct net_device *dev = block_cb->indr.dev;
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct net *net = dev_net(dev);
|
||||
struct flow_block_offload bo;
|
||||
|
||||
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
|
||||
basechain, &extack);
|
||||
mutex_lock(&net->nft.commit_mutex);
|
||||
list_move(&block_cb->list, &bo.cb_list);
|
||||
nft_flow_offload_unbind(&bo, basechain);
|
||||
mutex_unlock(&net->nft.commit_mutex);
|
||||
}
|
||||
|
||||
static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
|
||||
struct net_device *dev,
|
||||
enum flow_block_command cmd)
|
||||
{
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct flow_block_offload bo;
|
||||
int err;
|
||||
|
||||
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
|
||||
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
|
||||
|
||||
flow_indr_block_call(dev, &bo, cmd, TC_SETUP_BLOCK);
|
||||
err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo,
|
||||
nft_indr_block_cleanup);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (list_empty(&bo.cb_list))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return nft_block_setup(chain, &bo, cmd);
|
||||
return nft_block_setup(basechain, &bo, cmd);
|
||||
}
|
||||
|
||||
#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
|
||||
|
@ -709,24 +709,26 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
|
||||
INIT_LIST_HEAD(&bo->cb_list);
|
||||
}
|
||||
|
||||
static void tc_indr_block_call(struct tcf_block *block,
|
||||
struct net_device *dev,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum flow_block_command command,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct flow_block_offload bo = {
|
||||
.command = command,
|
||||
.binder_type = ei->binder_type,
|
||||
.net = dev_net(dev),
|
||||
.block = &block->flow_block,
|
||||
.block_shared = tcf_block_shared(block),
|
||||
.extack = extack,
|
||||
};
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
static void tcf_block_unbind(struct tcf_block *block,
|
||||
struct flow_block_offload *bo);
|
||||
|
||||
flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK);
|
||||
tcf_block_setup(block, &bo);
|
||||
static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
|
||||
{
|
||||
struct tcf_block *block = block_cb->indr.data;
|
||||
struct net_device *dev = block_cb->indr.dev;
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct flow_block_offload bo;
|
||||
|
||||
tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
|
||||
block_cb->indr.binder_type,
|
||||
&block->flow_block, tcf_block_shared(block),
|
||||
&extack);
|
||||
down_write(&block->cb_lock);
|
||||
list_move(&block_cb->list, &bo.cb_list);
|
||||
up_write(&block->cb_lock);
|
||||
rtnl_lock();
|
||||
tcf_block_unbind(block, &bo);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static bool tcf_block_offload_in_use(struct tcf_block *block)
|
||||
@ -747,7 +749,12 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
|
||||
&block->flow_block, tcf_block_shared(block),
|
||||
extack);
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
|
||||
if (dev->netdev_ops->ndo_setup_tc)
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
|
||||
else
|
||||
err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
|
||||
&bo, tc_block_indr_cleanup);
|
||||
|
||||
if (err < 0) {
|
||||
if (err != -EOPNOTSUPP)
|
||||
NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
|
||||
@ -765,13 +772,13 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
||||
int err;
|
||||
|
||||
down_write(&block->cb_lock);
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
goto no_offload_dev_inc;
|
||||
|
||||
/* If tc offload feature is disabled and the block we try to bind
|
||||
* to already has some offloaded filters, forbid to bind.
|
||||
*/
|
||||
if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
|
||||
if (dev->netdev_ops->ndo_setup_tc &&
|
||||
!tc_can_offload(dev) &&
|
||||
tcf_block_offload_in_use(block)) {
|
||||
NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_unlock;
|
||||
@ -783,18 +790,15 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
|
||||
up_write(&block->cb_lock);
|
||||
return 0;
|
||||
|
||||
no_offload_dev_inc:
|
||||
if (tcf_block_offload_in_use(block)) {
|
||||
err = -EOPNOTSUPP;
|
||||
if (tcf_block_offload_in_use(block))
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
block->nooffloaddevcnt++;
|
||||
tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
|
||||
err_unlock:
|
||||
up_write(&block->cb_lock);
|
||||
return err;
|
||||
@ -807,10 +811,6 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
|
||||
int err;
|
||||
|
||||
down_write(&block->cb_lock);
|
||||
tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
goto no_offload_dev_dec;
|
||||
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_dec;
|
||||
|
Loading…
Reference in New Issue
Block a user