mirror of
https://github.com/torvalds/linux.git
synced 2024-12-02 17:11:33 +00:00
Merge branch 'nf-hw-offload'
Pablo Neira Ayuso says: ==================== netfilter: add hardware offload infrastructure This patchset adds support for Netfilter hardware offloads. This patchset reuses the existing block infrastructure, the netdev_ops->ndo_setup_tc() interface, TC_SETUP_CLSFLOWER classifier and the flow rule API. Patch #1 adds flow_block_cb_setup_simple(), most drivers do the same thing to set up flow blocks, to reduce the number of changes, consolidate codebase. Use _simple() postfix as requested by Jakub Kicinski. This new function resides in net/core/flow_offload.c Patch #2 renames TC_BLOCK_{UN}BIND to FLOW_BLOCK_{UN}BIND. Patch #3 renames TCF_BLOCK_BINDER_TYPE_* to FLOW_BLOCK_BINDER_TYPE_*. Patch #4 adds flow_block_cb_alloc() and flow_block_cb_free() helper functions, this is the first patch of the flow block API. Patch #5 adds the helper to deal with list operations in the flow block API. This includes flow_block_cb_lookup(), flow_block_cb_add() and flow_block_cb_remove(). Patch #6 adds flow_block_cb_priv(), flow_block_cb_incref() and flow_block_cb_decref() which completes the flow block API. Patch #7 updates the cls_api to use the flow block API from the new tcf_block_setup(). This infrastructure transports these objects via list (through the tc_block_offload object) back to the core for registration. CLS_API DRIVER TC_SETUP_BLOCK ----------> setup flow_block_cb object & it adds object to flow_block_offload->cb_list | CLS_API <-----------------------' registers list with flow blocks flow_block_cb & travels back to calls ->reoffload the core for registration drivers allocate and sets up (configure the blocks), then registration happens from the core (cls_api and netfilter). Patch #8 updates drivers to use the flow block API. Patch #9 removes the tcf block callback API, which is replaced by the flow block API. Patch #10 adds the flow_block_cb_is_busy() helper to check if the block is already used by a subsystem. This helper is invoked from drivers. Once drivers are updated to support for multiple subsystems, they can remove this check. Patch #11 rename tc structure and definitions for the block bind/unbind path. Patch #12 introduces basic netfilter hardware offload infrastructure for the ingress chain. This includes 5-tuple exact matching and accept / drop rule actions. Only basechains are supported at this stage, no .reoffload callback is implemented either. Default policy to "accept" is only supported for now. table netdev filter { chain ingress { type filter hook ingress device eth0 priority 0; flags offload; ip daddr 192.168.0.10 tcp dport 22 drop } } This patchset reuses the existing tcf block callback API and it places it in the flow block callback API in net/core/flow_offload.c. This series aims to address Jakub and Jiri's feedback, please see specific patches in this batch for changelog in this v4. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c099a40894
@ -9907,32 +9907,19 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
|
||||
bp, bp, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(bnxt_block_cb_list);
|
||||
|
||||
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return bnxt_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&bnxt_block_cb_list,
|
||||
bnxt_setup_tc_block_cb,
|
||||
bp, bp, true);
|
||||
case TC_SETUP_QDISC_MQPRIO: {
|
||||
struct tc_mqprio_qopt *mqprio = type_data;
|
||||
|
||||
|
@ -170,10 +170,10 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
|
||||
}
|
||||
|
||||
static int bnxt_tc_parse_flow(struct bnxt *bp,
|
||||
struct tc_cls_flower_offload *tc_flow_cmd,
|
||||
struct flow_cls_offload *tc_flow_cmd,
|
||||
struct bnxt_tc_flow *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
|
||||
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
|
||||
@ -1262,7 +1262,7 @@ static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
||||
* The hash-tables are already protected by the rhashtable API.
|
||||
*/
|
||||
static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
|
||||
struct tc_cls_flower_offload *tc_flow_cmd)
|
||||
struct flow_cls_offload *tc_flow_cmd)
|
||||
{
|
||||
struct bnxt_tc_flow_node *new_node, *old_node;
|
||||
struct bnxt_tc_info *tc_info = bp->tc_info;
|
||||
@ -1348,7 +1348,7 @@ done:
|
||||
}
|
||||
|
||||
static int bnxt_tc_del_flow(struct bnxt *bp,
|
||||
struct tc_cls_flower_offload *tc_flow_cmd)
|
||||
struct flow_cls_offload *tc_flow_cmd)
|
||||
{
|
||||
struct bnxt_tc_info *tc_info = bp->tc_info;
|
||||
struct bnxt_tc_flow_node *flow_node;
|
||||
@ -1363,7 +1363,7 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
|
||||
}
|
||||
|
||||
static int bnxt_tc_get_flow_stats(struct bnxt *bp,
|
||||
struct tc_cls_flower_offload *tc_flow_cmd)
|
||||
struct flow_cls_offload *tc_flow_cmd)
|
||||
{
|
||||
struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
|
||||
struct bnxt_tc_info *tc_info = bp->tc_info;
|
||||
@ -1585,14 +1585,14 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
|
||||
}
|
||||
|
||||
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return bnxt_tc_add_flow(bp, src_fid, cls_flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return bnxt_tc_del_flow(bp, cls_flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return bnxt_tc_get_flow_stats(bp, cls_flower);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -196,7 +196,7 @@ struct bnxt_tc_flow_node {
|
||||
};
|
||||
|
||||
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
|
||||
struct tc_cls_flower_offload *cls_flower);
|
||||
struct flow_cls_offload *cls_flower);
|
||||
int bnxt_init_tc(struct bnxt *bp);
|
||||
void bnxt_shutdown_tc(struct bnxt *bp);
|
||||
void bnxt_tc_flow_stats_work(struct bnxt *bp);
|
||||
@ -209,7 +209,7 @@ static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
|
||||
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
|
||||
|
||||
static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -161,34 +161,19 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block,
|
||||
bnxt_vf_rep_setup_tc_block_cb,
|
||||
vf_rep, vf_rep, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block,
|
||||
bnxt_vf_rep_setup_tc_block_cb, vf_rep);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(bnxt_vf_block_cb_list);
|
||||
|
||||
static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return bnxt_vf_rep_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&bnxt_vf_block_cb_list,
|
||||
bnxt_vf_rep_setup_tc_block_cb,
|
||||
vf_rep, vf_rep, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -3135,14 +3135,14 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
|
||||
}
|
||||
|
||||
static int cxgb_setup_tc_flower(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return cxgb4_tc_flower_replace(dev, cls_flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return cxgb4_tc_flower_destroy(dev, cls_flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return cxgb4_tc_flower_stats(dev, cls_flower);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -3190,32 +3190,19 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int cxgb_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct port_info *pi = netdev2pinfo(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
|
||||
pi, dev, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(cxgb_block_cb_list);
|
||||
|
||||
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct port_info *pi = netdev2pinfo(dev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return cxgb_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&cxgb_block_cb_list,
|
||||
cxgb_setup_tc_block_cb,
|
||||
pi, dev, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -80,10 +80,10 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
|
||||
}
|
||||
|
||||
static void cxgb4_process_flow_match(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls,
|
||||
struct flow_cls_offload *cls,
|
||||
struct ch_filter_specification *fs)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
u16 addr_type = 0;
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
|
||||
@ -223,9 +223,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
||||
}
|
||||
|
||||
static int cxgb4_validate_flow_match(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls)
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
u16 ethtype_mask = 0;
|
||||
u16 ethtype_key = 0;
|
||||
@ -378,10 +378,10 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
|
||||
}
|
||||
|
||||
static void cxgb4_process_flow_actions(struct net_device *in,
|
||||
struct tc_cls_flower_offload *cls,
|
||||
struct flow_cls_offload *cls,
|
||||
struct ch_filter_specification *fs)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
struct flow_action_entry *act;
|
||||
int i;
|
||||
|
||||
@ -544,9 +544,9 @@ static bool valid_pedit_action(struct net_device *dev,
|
||||
}
|
||||
|
||||
static int cxgb4_validate_flow_actions(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls)
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
struct flow_action_entry *act;
|
||||
bool act_redir = false;
|
||||
bool act_pedit = false;
|
||||
@ -633,7 +633,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
|
||||
}
|
||||
|
||||
int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls)
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct ch_tc_flower_entry *ch_flower;
|
||||
@ -709,7 +709,7 @@ free_entry:
|
||||
}
|
||||
|
||||
int cxgb4_tc_flower_destroy(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls)
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct ch_tc_flower_entry *ch_flower;
|
||||
@ -783,7 +783,7 @@ static void ch_flower_stats_cb(struct timer_list *t)
|
||||
}
|
||||
|
||||
int cxgb4_tc_flower_stats(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls)
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct ch_tc_flower_stats *ofld_stats;
|
||||
|
@ -109,11 +109,11 @@ struct ch_tc_pedit_fields {
|
||||
#define PEDIT_UDP_SPORT_DPORT 0x0
|
||||
|
||||
int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls);
|
||||
struct flow_cls_offload *cls);
|
||||
int cxgb4_tc_flower_destroy(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls);
|
||||
struct flow_cls_offload *cls);
|
||||
int cxgb4_tc_flower_stats(struct net_device *dev,
|
||||
struct tc_cls_flower_offload *cls);
|
||||
struct flow_cls_offload *cls);
|
||||
|
||||
int cxgb4_init_tc_flower(struct adapter *adap);
|
||||
void cxgb4_cleanup_tc_flower(struct adapter *adap);
|
||||
|
@ -7759,15 +7759,15 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
|
||||
/**
|
||||
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
|
||||
* @vsi: Pointer to VSI
|
||||
* @cls_flower: Pointer to struct tc_cls_flower_offload
|
||||
* @cls_flower: Pointer to struct flow_cls_offload
|
||||
* @filter: Pointer to cloud filter structure
|
||||
*
|
||||
**/
|
||||
static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct i40e_cloud_filter *filter)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
@ -8001,11 +8001,11 @@ static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
|
||||
/**
|
||||
* i40e_configure_clsflower - Configure tc flower filters
|
||||
* @vsi: Pointer to VSI
|
||||
* @cls_flower: Pointer to struct tc_cls_flower_offload
|
||||
* @cls_flower: Pointer to struct flow_cls_offload
|
||||
*
|
||||
**/
|
||||
static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
|
||||
struct i40e_cloud_filter *filter = NULL;
|
||||
@ -8097,11 +8097,11 @@ static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
|
||||
/**
|
||||
* i40e_delete_clsflower - Remove tc flower filters
|
||||
* @vsi: Pointer to VSI
|
||||
* @cls_flower: Pointer to struct tc_cls_flower_offload
|
||||
* @cls_flower: Pointer to struct flow_cls_offload
|
||||
*
|
||||
**/
|
||||
static int i40e_delete_clsflower(struct i40e_vsi *vsi,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
struct i40e_cloud_filter *filter = NULL;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
@ -8144,16 +8144,16 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
|
||||
* @type_data: offload data
|
||||
**/
|
||||
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return i40e_configure_clsflower(vsi, cls_flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return i40e_delete_clsflower(vsi, cls_flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -8177,34 +8177,21 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int i40e_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
|
||||
np, np, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(i40e_block_cb_list);
|
||||
|
||||
static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
return i40e_setup_tc(netdev, type_data);
|
||||
case TC_SETUP_BLOCK:
|
||||
return i40e_setup_tc_block(netdev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&i40e_block_cb_list,
|
||||
i40e_setup_tc_block_cb,
|
||||
np, np, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -2699,14 +2699,14 @@ exit:
|
||||
/**
|
||||
* iavf_parse_cls_flower - Parse tc flower filters provided by kernel
|
||||
* @adapter: board private structure
|
||||
* @cls_flower: pointer to struct tc_cls_flower_offload
|
||||
* @cls_flower: pointer to struct flow_cls_offload
|
||||
* @filter: pointer to cloud filter structure
|
||||
*/
|
||||
static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct iavf_cloud_filter *filter)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
u16 n_proto_mask = 0;
|
||||
u16 n_proto_key = 0;
|
||||
@ -2971,10 +2971,10 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
|
||||
/**
|
||||
* iavf_configure_clsflower - Add tc flower filters
|
||||
* @adapter: board private structure
|
||||
* @cls_flower: Pointer to struct tc_cls_flower_offload
|
||||
* @cls_flower: Pointer to struct flow_cls_offload
|
||||
*/
|
||||
static int iavf_configure_clsflower(struct iavf_adapter *adapter,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
|
||||
struct iavf_cloud_filter *filter = NULL;
|
||||
@ -3050,10 +3050,10 @@ static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
|
||||
/**
|
||||
* iavf_delete_clsflower - Remove tc flower filters
|
||||
* @adapter: board private structure
|
||||
* @cls_flower: Pointer to struct tc_cls_flower_offload
|
||||
* @cls_flower: Pointer to struct flow_cls_offload
|
||||
*/
|
||||
static int iavf_delete_clsflower(struct iavf_adapter *adapter,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
struct iavf_cloud_filter *filter = NULL;
|
||||
int err = 0;
|
||||
@ -3077,17 +3077,17 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
|
||||
* @type_data: offload data
|
||||
*/
|
||||
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
if (cls_flower->common.chain_index)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return iavf_configure_clsflower(adapter, cls_flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return iavf_delete_clsflower(adapter, cls_flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -3113,34 +3113,7 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_setup_tc_block - register callbacks for tc
|
||||
* @netdev: network interface device structure
|
||||
* @f: tc offload data
|
||||
*
|
||||
* This function registers block callbacks for tc
|
||||
* offloads
|
||||
**/
|
||||
static int iavf_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
|
||||
adapter, adapter, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
|
||||
adapter);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(iavf_block_cb_list);
|
||||
|
||||
/**
|
||||
* iavf_setup_tc - configure multiple traffic classes
|
||||
@ -3156,11 +3129,16 @@ static int iavf_setup_tc_block(struct net_device *dev,
|
||||
static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
return __iavf_setup_tc(netdev, type_data);
|
||||
case TC_SETUP_BLOCK:
|
||||
return iavf_setup_tc_block(netdev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&iavf_block_cb_list,
|
||||
iavf_setup_tc_block_cb,
|
||||
adapter, adapter, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -2578,11 +2578,11 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
|
||||
#define VLAN_PRIO_FULL_MASK (0x07)
|
||||
|
||||
static int igb_parse_cls_flower(struct igb_adapter *adapter,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
int traffic_class,
|
||||
struct igb_nfc_filter *input)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
|
||||
@ -2660,7 +2660,7 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
|
||||
}
|
||||
|
||||
static int igb_configure_clsflower(struct igb_adapter *adapter,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
struct netlink_ext_ack *extack = cls_flower->common.extack;
|
||||
struct igb_nfc_filter *filter, *f;
|
||||
@ -2722,7 +2722,7 @@ err_parse:
|
||||
}
|
||||
|
||||
static int igb_delete_clsflower(struct igb_adapter *adapter,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
struct igb_nfc_filter *filter;
|
||||
int err;
|
||||
@ -2752,14 +2752,14 @@ out:
|
||||
}
|
||||
|
||||
static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
|
||||
struct tc_cls_flower_offload *cls_flower)
|
||||
struct flow_cls_offload *cls_flower)
|
||||
{
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return igb_configure_clsflower(adapter, cls_flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return igb_delete_clsflower(adapter, cls_flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -2783,25 +2783,6 @@ static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int igb_setup_tc_block(struct igb_adapter *adapter,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
|
||||
adapter, adapter, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
|
||||
adapter);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int igb_offload_txtime(struct igb_adapter *adapter,
|
||||
struct tc_etf_qopt_offload *qopt)
|
||||
{
|
||||
@ -2825,6 +2806,8 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static LIST_HEAD(igb_block_cb_list);
|
||||
|
||||
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
@ -2834,7 +2817,11 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
case TC_SETUP_QDISC_CBS:
|
||||
return igb_offload_cbs(adapter, type_data);
|
||||
case TC_SETUP_BLOCK:
|
||||
return igb_setup_tc_block(adapter, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&igb_block_cb_list,
|
||||
igb_setup_tc_block_cb,
|
||||
adapter, adapter, true);
|
||||
|
||||
case TC_SETUP_QDISC_ETF:
|
||||
return igb_offload_txtime(adapter, type_data);
|
||||
|
||||
|
@ -9607,27 +9607,6 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int ixgbe_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
|
||||
adapter, adapter, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
|
||||
adapter);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int ixgbe_setup_tc_mqprio(struct net_device *dev,
|
||||
struct tc_mqprio_qopt *mqprio)
|
||||
{
|
||||
@ -9635,12 +9614,19 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev,
|
||||
return ixgbe_setup_tc(dev, mqprio->num_tc);
|
||||
}
|
||||
|
||||
static LIST_HEAD(ixgbe_block_cb_list);
|
||||
|
||||
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return ixgbe_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&ixgbe_block_cb_list,
|
||||
ixgbe_setup_tc_block_cb,
|
||||
adapter, adapter, true);
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
return ixgbe_setup_tc_mqprio(dev, type_data);
|
||||
default:
|
||||
|
@ -452,7 +452,7 @@ int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
|
||||
int mlx5e_tc_tun_parse(struct net_device *filter_dev,
|
||||
struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v, u8 *match_level)
|
||||
{
|
||||
@ -489,11 +489,11 @@ out:
|
||||
|
||||
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct flow_match_ports enc_ports;
|
||||
|
||||
|
@ -33,12 +33,12 @@ struct mlx5e_tc_tunnel {
|
||||
struct mlx5e_encap_entry *e);
|
||||
int (*parse_udp_ports)(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v);
|
||||
int (*parse_tunnel)(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v);
|
||||
};
|
||||
@ -68,13 +68,13 @@ bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
|
||||
int mlx5e_tc_tun_parse(struct net_device *filter_dev,
|
||||
struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v, u8 *match_level);
|
||||
|
||||
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v);
|
||||
|
||||
|
@ -20,9 +20,9 @@ static int mlx5e_tc_tun_calc_hlen_geneve(struct mlx5e_encap_entry *e)
|
||||
}
|
||||
|
||||
static int mlx5e_tc_tun_check_udp_dport_geneve(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct flow_match_ports enc_ports;
|
||||
|
||||
@ -48,7 +48,7 @@ static int mlx5e_tc_tun_check_udp_dport_geneve(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_tc_tun_parse_udp_ports_geneve(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
{
|
||||
@ -122,9 +122,9 @@ static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
|
||||
|
||||
static int mlx5e_tc_tun_parse_geneve_vni(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct flow_match_enc_keyid enc_keyid;
|
||||
void *misc_c, *misc_v;
|
||||
@ -154,11 +154,11 @@ static int mlx5e_tc_tun_parse_geneve_vni(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
u8 max_tlv_option_data_len = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_option_data_len);
|
||||
u8 max_tlv_options = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_options);
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
void *misc_c, *misc_v, *misc_3_c, *misc_3_v;
|
||||
struct geneve_opt *option_key, *option_mask;
|
||||
@ -277,7 +277,7 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
||||
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
||||
@ -306,7 +306,7 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
{
|
||||
|
@ -54,13 +54,13 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
|
||||
|
||||
static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
{
|
||||
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
||||
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
|
||||
|
@ -16,9 +16,9 @@ static int mlx5e_tc_tun_calc_hlen_vxlan(struct mlx5e_encap_entry *e)
|
||||
}
|
||||
|
||||
static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct flow_match_ports enc_ports;
|
||||
|
||||
@ -44,7 +44,7 @@ static int mlx5e_tc_tun_check_udp_dport_vxlan(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_tc_tun_parse_udp_ports_vxlan(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
{
|
||||
@ -100,11 +100,11 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
|
||||
|
||||
static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct flow_match_enc_keyid enc_keyid;
|
||||
void *misc_c, *misc_v;
|
||||
|
@ -3426,17 +3426,17 @@ out:
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *cls_flower,
|
||||
struct flow_cls_offload *cls_flower,
|
||||
int flags)
|
||||
{
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
|
||||
flags);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
|
||||
flags);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
|
||||
flags);
|
||||
default:
|
||||
@ -3457,36 +3457,22 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
|
||||
priv, priv, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
|
||||
priv);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static LIST_HEAD(mlx5e_block_cb_list);
|
||||
|
||||
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
|
||||
switch (type) {
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
case TC_SETUP_BLOCK:
|
||||
return mlx5e_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&mlx5e_block_cb_list,
|
||||
mlx5e_setup_tc_block_cb,
|
||||
priv, priv, true);
|
||||
#endif
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
return mlx5e_setup_tc_mqprio(dev, type_data);
|
||||
|
@ -656,7 +656,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
|
||||
|
||||
static int
|
||||
mlx5e_rep_indr_offload(struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flower,
|
||||
struct flow_cls_offload *flower,
|
||||
struct mlx5e_rep_indr_block_priv *indr_priv)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
|
||||
@ -664,13 +664,13 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
|
||||
int err = 0;
|
||||
|
||||
switch (flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
err = mlx5e_configure_flower(netdev, priv, flower, flags);
|
||||
break;
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
err = mlx5e_delete_flower(netdev, priv, flower, flags);
|
||||
break;
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
err = mlx5e_stats_flower(netdev, priv, flower, flags);
|
||||
break;
|
||||
default:
|
||||
@ -693,23 +693,39 @@ static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
|
||||
{
|
||||
struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
|
||||
|
||||
list_del(&indr_priv->list);
|
||||
kfree(indr_priv);
|
||||
}
|
||||
|
||||
static LIST_HEAD(mlx5e_block_cb_list);
|
||||
|
||||
static int
|
||||
mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
|
||||
struct mlx5e_rep_priv *rpriv,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct mlx5e_rep_indr_block_priv *indr_priv;
|
||||
int err = 0;
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
f->driver_block_list = &mlx5e_block_cb_list;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
case FLOW_BLOCK_BIND:
|
||||
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
|
||||
if (indr_priv)
|
||||
return -EEXIST;
|
||||
|
||||
if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
|
||||
indr_priv, &mlx5e_block_cb_list))
|
||||
return -EBUSY;
|
||||
|
||||
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
|
||||
if (!indr_priv)
|
||||
return -ENOMEM;
|
||||
@ -719,26 +735,32 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
|
||||
list_add(&indr_priv->list,
|
||||
&rpriv->uplink_priv.tc_indr_block_priv_list);
|
||||
|
||||
err = tcf_block_cb_register(f->block,
|
||||
mlx5e_rep_indr_setup_block_cb,
|
||||
indr_priv, indr_priv, f->extack);
|
||||
if (err) {
|
||||
block_cb = flow_block_cb_alloc(f->net,
|
||||
mlx5e_rep_indr_setup_block_cb,
|
||||
indr_priv, indr_priv,
|
||||
mlx5e_rep_indr_tc_block_unbind);
|
||||
if (IS_ERR(block_cb)) {
|
||||
list_del(&indr_priv->list);
|
||||
kfree(indr_priv);
|
||||
return PTR_ERR(block_cb);
|
||||
}
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
|
||||
|
||||
return err;
|
||||
case TC_BLOCK_UNBIND:
|
||||
return 0;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
|
||||
if (!indr_priv)
|
||||
return -ENOENT;
|
||||
|
||||
tcf_block_cb_unregister(f->block,
|
||||
mlx5e_rep_indr_setup_block_cb,
|
||||
indr_priv);
|
||||
list_del(&indr_priv->list);
|
||||
kfree(indr_priv);
|
||||
block_cb = flow_block_cb_lookup(f,
|
||||
mlx5e_rep_indr_setup_block_cb,
|
||||
indr_priv);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -1122,16 +1144,16 @@ static int mlx5e_rep_close(struct net_device *dev)
|
||||
|
||||
static int
|
||||
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *cls_flower, int flags)
|
||||
struct flow_cls_offload *cls_flower, int flags)
|
||||
{
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
|
||||
flags);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
|
||||
flags);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
|
||||
flags);
|
||||
default:
|
||||
@ -1153,32 +1175,16 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_rep_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
|
||||
priv, priv, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return mlx5e_rep_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data, NULL,
|
||||
mlx5e_rep_setup_tc_cb,
|
||||
priv, priv, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1350,7 +1350,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||
|
||||
static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct net_device *filter_dev, u8 *match_level)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
@ -1358,7 +1358,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
outer_headers);
|
||||
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||
outer_headers);
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
int err;
|
||||
|
||||
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
|
||||
@ -1478,7 +1478,7 @@ static void *get_match_headers_value(u32 flags,
|
||||
|
||||
static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct net_device *filter_dev,
|
||||
u8 *match_level, u8 *tunnel_match_level)
|
||||
{
|
||||
@ -1491,7 +1491,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
misc_parameters);
|
||||
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||
misc_parameters);
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
u16 addr_type = 0;
|
||||
u8 ip_proto = 0;
|
||||
@ -1831,7 +1831,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct net_device *filter_dev)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
@ -3115,7 +3115,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
|
||||
|
||||
static int
|
||||
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
|
||||
struct tc_cls_flower_offload *f, u16 flow_flags,
|
||||
struct flow_cls_offload *f, u16 flow_flags,
|
||||
struct mlx5e_tc_flow_parse_attr **__parse_attr,
|
||||
struct mlx5e_tc_flow **__flow)
|
||||
{
|
||||
@ -3149,7 +3149,7 @@ static void
|
||||
mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
|
||||
struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlx5_eswitch_rep *in_rep,
|
||||
struct mlx5_core_dev *in_mdev)
|
||||
{
|
||||
@ -3171,13 +3171,13 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
|
||||
|
||||
static struct mlx5e_tc_flow *
|
||||
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
u16 flow_flags,
|
||||
struct net_device *filter_dev,
|
||||
struct mlx5_eswitch_rep *in_rep,
|
||||
struct mlx5_core_dev *in_mdev)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
struct mlx5e_tc_flow *flow;
|
||||
@ -3221,7 +3221,7 @@ out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
|
||||
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
|
||||
struct mlx5e_tc_flow *flow,
|
||||
u16 flow_flags)
|
||||
{
|
||||
@ -3273,7 +3273,7 @@ out:
|
||||
|
||||
static int
|
||||
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
u16 flow_flags,
|
||||
struct net_device *filter_dev,
|
||||
struct mlx5e_tc_flow **__flow)
|
||||
@ -3307,12 +3307,12 @@ out:
|
||||
|
||||
static int
|
||||
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
u16 flow_flags,
|
||||
struct net_device *filter_dev,
|
||||
struct mlx5e_tc_flow **__flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
struct mlx5e_tc_flow *flow;
|
||||
@ -3358,7 +3358,7 @@ out:
|
||||
|
||||
static int
|
||||
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
int flags,
|
||||
struct net_device *filter_dev,
|
||||
struct mlx5e_tc_flow **flow)
|
||||
@ -3383,7 +3383,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags)
|
||||
struct flow_cls_offload *f, int flags)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
|
||||
@ -3430,7 +3430,7 @@ static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
|
||||
}
|
||||
|
||||
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags)
|
||||
struct flow_cls_offload *f, int flags)
|
||||
{
|
||||
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
|
||||
struct mlx5e_tc_flow *flow;
|
||||
@ -3449,7 +3449,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags)
|
||||
struct flow_cls_offload *f, int flags)
|
||||
{
|
||||
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
|
||||
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
|
||||
|
@ -54,12 +54,12 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
|
||||
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
|
||||
|
||||
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags);
|
||||
struct flow_cls_offload *f, int flags);
|
||||
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags);
|
||||
struct flow_cls_offload *f, int flags);
|
||||
|
||||
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags);
|
||||
struct flow_cls_offload *f, int flags);
|
||||
|
||||
struct mlx5e_encap_entry;
|
||||
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
|
||||
|
@ -1508,21 +1508,21 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
|
||||
static int
|
||||
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
|
||||
|
||||
switch (f->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
|
||||
return 0;
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
|
||||
case TC_CLSFLOWER_TMPLT_CREATE:
|
||||
case FLOW_CLS_TMPLT_CREATE:
|
||||
return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
|
||||
case TC_CLSFLOWER_TMPLT_DESTROY:
|
||||
case FLOW_CLS_TMPLT_DESTROY:
|
||||
mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
|
||||
return 0;
|
||||
default:
|
||||
@ -1585,33 +1585,45 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
|
||||
}
|
||||
}
|
||||
|
||||
static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
|
||||
{
|
||||
struct mlxsw_sp_acl_block *acl_block = cb_priv;
|
||||
|
||||
mlxsw_sp_acl_block_destroy(acl_block);
|
||||
}
|
||||
|
||||
static LIST_HEAD(mlxsw_sp_block_cb_list);
|
||||
|
||||
static int
|
||||
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tcf_block *block, bool ingress,
|
||||
struct netlink_ext_ack *extack)
|
||||
struct flow_block_offload *f, bool ingress)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_acl_block *acl_block;
|
||||
struct tcf_block_cb *block_cb;
|
||||
struct flow_block_cb *block_cb;
|
||||
bool register_block = false;
|
||||
int err;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp);
|
||||
block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp);
|
||||
if (!block_cb) {
|
||||
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
|
||||
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
|
||||
if (!acl_block)
|
||||
return -ENOMEM;
|
||||
block_cb = __tcf_block_cb_register(block,
|
||||
mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp, acl_block, extack);
|
||||
block_cb = flow_block_cb_alloc(f->net,
|
||||
mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp, acl_block,
|
||||
mlxsw_sp_tc_block_flower_release);
|
||||
if (IS_ERR(block_cb)) {
|
||||
mlxsw_sp_acl_block_destroy(acl_block);
|
||||
err = PTR_ERR(block_cb);
|
||||
goto err_cb_register;
|
||||
}
|
||||
register_block = true;
|
||||
} else {
|
||||
acl_block = tcf_block_cb_priv(block_cb);
|
||||
acl_block = flow_block_cb_priv(block_cb);
|
||||
}
|
||||
tcf_block_cb_incref(block_cb);
|
||||
flow_block_cb_incref(block_cb);
|
||||
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
|
||||
mlxsw_sp_port, ingress);
|
||||
if (err)
|
||||
@ -1622,28 +1634,31 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
else
|
||||
mlxsw_sp_port->eg_acl_block = acl_block;
|
||||
|
||||
if (register_block) {
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_block_bind:
|
||||
if (!tcf_block_cb_decref(block_cb)) {
|
||||
__tcf_block_cb_unregister(block, block_cb);
|
||||
if (!flow_block_cb_decref(block_cb))
|
||||
flow_block_cb_free(block_cb);
|
||||
err_cb_register:
|
||||
mlxsw_sp_acl_block_destroy(acl_block);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tcf_block *block, bool ingress)
|
||||
struct flow_block_offload *f, bool ingress)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_acl_block *acl_block;
|
||||
struct tcf_block_cb *block_cb;
|
||||
struct flow_block_cb *block_cb;
|
||||
int err;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp);
|
||||
block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp);
|
||||
if (!block_cb)
|
||||
return;
|
||||
|
||||
@ -1652,50 +1667,63 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
else
|
||||
mlxsw_sp_port->eg_acl_block = NULL;
|
||||
|
||||
acl_block = tcf_block_cb_priv(block_cb);
|
||||
acl_block = flow_block_cb_priv(block_cb);
|
||||
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
|
||||
mlxsw_sp_port, ingress);
|
||||
if (!err && !tcf_block_cb_decref(block_cb)) {
|
||||
__tcf_block_cb_unregister(block, block_cb);
|
||||
mlxsw_sp_acl_block_destroy(acl_block);
|
||||
if (!err && !flow_block_cb_decref(block_cb)) {
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
tc_setup_cb_t *cb;
|
||||
bool ingress;
|
||||
int err;
|
||||
|
||||
if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
|
||||
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
|
||||
cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
|
||||
ingress = true;
|
||||
} else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
|
||||
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
|
||||
cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
|
||||
ingress = false;
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
f->driver_block_list = &mlxsw_sp_block_cb_list;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
|
||||
mlxsw_sp_port, f->extack);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
|
||||
f->block, ingress,
|
||||
f->extack);
|
||||
case FLOW_BLOCK_BIND:
|
||||
if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
|
||||
&mlxsw_sp_block_cb_list))
|
||||
return -EBUSY;
|
||||
|
||||
block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port,
|
||||
mlxsw_sp_port, NULL);
|
||||
if (IS_ERR(block_cb))
|
||||
return PTR_ERR(block_cb);
|
||||
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
|
||||
ingress);
|
||||
if (err) {
|
||||
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
|
||||
flow_block_cb_free(block_cb);
|
||||
return err;
|
||||
}
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
|
||||
return 0;
|
||||
case TC_BLOCK_UNBIND:
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
|
||||
f->block, ingress);
|
||||
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
|
||||
f, ingress);
|
||||
block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -807,19 +807,19 @@ extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
|
||||
/* spectrum_flower.c */
|
||||
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
struct flow_cls_offload *f);
|
||||
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
struct flow_cls_offload *f);
|
||||
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
struct flow_cls_offload *f);
|
||||
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
struct flow_cls_offload *f);
|
||||
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
struct flow_cls_offload *f);
|
||||
|
||||
/* spectrum_qdisc.c */
|
||||
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
|
||||
|
@ -121,10 +121,10 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
||||
}
|
||||
|
||||
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct mlxsw_sp_port *mlxsw_sp_port;
|
||||
struct net_device *ingress_dev;
|
||||
struct flow_match_meta match;
|
||||
@ -164,7 +164,7 @@ static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
|
||||
}
|
||||
|
||||
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
@ -179,7 +179,7 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
|
||||
}
|
||||
|
||||
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_match_ipv6_addrs match;
|
||||
|
||||
@ -213,10 +213,10 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
|
||||
|
||||
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
u8 ip_proto)
|
||||
{
|
||||
const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_match_ports match;
|
||||
|
||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
|
||||
@ -240,10 +240,10 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
u8 ip_proto)
|
||||
{
|
||||
const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_match_tcp match;
|
||||
|
||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
|
||||
@ -265,10 +265,10 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct flow_cls_offload *f,
|
||||
u16 n_proto)
|
||||
{
|
||||
const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_match_ip match;
|
||||
|
||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
|
||||
@ -299,9 +299,9 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
|
||||
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
u16 n_proto_mask = 0;
|
||||
u16 n_proto_key = 0;
|
||||
@ -426,7 +426,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct mlxsw_sp_acl_rule_info *rulei;
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
@ -473,7 +473,7 @@ err_rule_create:
|
||||
|
||||
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
struct mlxsw_sp_acl_rule *rule;
|
||||
@ -495,7 +495,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
struct mlxsw_sp_acl_rule *rule;
|
||||
@ -531,7 +531,7 @@ err_rule_get_stats:
|
||||
|
||||
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
struct mlxsw_sp_acl_rule_info rulei;
|
||||
@ -552,7 +552,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
|
||||
|
||||
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
|
||||
|
@ -225,8 +225,8 @@ int ocelot_ace_init(struct ocelot *ocelot);
|
||||
void ocelot_ace_deinit(void);
|
||||
|
||||
int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
|
||||
struct tc_block_offload *f);
|
||||
struct flow_block_offload *f);
|
||||
void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
|
||||
struct tc_block_offload *f);
|
||||
struct flow_block_offload *f);
|
||||
|
||||
#endif /* _MSCC_OCELOT_ACE_H_ */
|
||||
|
@ -19,7 +19,7 @@ static u16 get_prio(u32 prio)
|
||||
return prio >> 16;
|
||||
}
|
||||
|
||||
static int ocelot_flower_parse_action(struct tc_cls_flower_offload *f,
|
||||
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
|
||||
struct ocelot_ace_rule *rule)
|
||||
{
|
||||
const struct flow_action_entry *a;
|
||||
@ -44,10 +44,10 @@ static int ocelot_flower_parse_action(struct tc_cls_flower_offload *f,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocelot_flower_parse(struct tc_cls_flower_offload *f,
|
||||
static int ocelot_flower_parse(struct flow_cls_offload *f,
|
||||
struct ocelot_ace_rule *ocelot_rule)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
|
||||
if (dissector->used_keys &
|
||||
@ -174,7 +174,7 @@ finished_key_parsing:
|
||||
}
|
||||
|
||||
static
|
||||
struct ocelot_ace_rule *ocelot_ace_rule_create(struct tc_cls_flower_offload *f,
|
||||
struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
|
||||
struct ocelot_port_block *block)
|
||||
{
|
||||
struct ocelot_ace_rule *rule;
|
||||
@ -188,7 +188,7 @@ struct ocelot_ace_rule *ocelot_ace_rule_create(struct tc_cls_flower_offload *f,
|
||||
return rule;
|
||||
}
|
||||
|
||||
static int ocelot_flower_replace(struct tc_cls_flower_offload *f,
|
||||
static int ocelot_flower_replace(struct flow_cls_offload *f,
|
||||
struct ocelot_port_block *port_block)
|
||||
{
|
||||
struct ocelot_ace_rule *rule;
|
||||
@ -212,7 +212,7 @@ static int ocelot_flower_replace(struct tc_cls_flower_offload *f,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocelot_flower_destroy(struct tc_cls_flower_offload *f,
|
||||
static int ocelot_flower_destroy(struct flow_cls_offload *f,
|
||||
struct ocelot_port_block *port_block)
|
||||
{
|
||||
struct ocelot_ace_rule rule;
|
||||
@ -230,7 +230,7 @@ static int ocelot_flower_destroy(struct tc_cls_flower_offload *f,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocelot_flower_stats_update(struct tc_cls_flower_offload *f,
|
||||
static int ocelot_flower_stats_update(struct flow_cls_offload *f,
|
||||
struct ocelot_port_block *port_block)
|
||||
{
|
||||
struct ocelot_ace_rule rule;
|
||||
@ -247,15 +247,15 @@ static int ocelot_flower_stats_update(struct tc_cls_flower_offload *f,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocelot_setup_tc_cls_flower(struct tc_cls_flower_offload *f,
|
||||
static int ocelot_setup_tc_cls_flower(struct flow_cls_offload *f,
|
||||
struct ocelot_port_block *port_block)
|
||||
{
|
||||
switch (f->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return ocelot_flower_replace(f, port_block);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return ocelot_flower_destroy(f, port_block);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return ocelot_flower_stats_update(f, port_block);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -299,36 +299,45 @@ static void ocelot_port_block_destroy(struct ocelot_port_block *block)
|
||||
kfree(block);
|
||||
}
|
||||
|
||||
static void ocelot_tc_block_unbind(void *cb_priv)
|
||||
{
|
||||
struct ocelot_port_block *port_block = cb_priv;
|
||||
|
||||
ocelot_port_block_destroy(port_block);
|
||||
}
|
||||
|
||||
int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct ocelot_port_block *port_block;
|
||||
struct tcf_block_cb *block_cb;
|
||||
struct flow_block_cb *block_cb;
|
||||
int ret;
|
||||
|
||||
if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(f->block,
|
||||
ocelot_setup_tc_block_cb_flower, port);
|
||||
block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
|
||||
port);
|
||||
if (!block_cb) {
|
||||
port_block = ocelot_port_block_create(port);
|
||||
if (!port_block)
|
||||
return -ENOMEM;
|
||||
|
||||
block_cb =
|
||||
__tcf_block_cb_register(f->block,
|
||||
ocelot_setup_tc_block_cb_flower,
|
||||
port, port_block, f->extack);
|
||||
block_cb = flow_block_cb_alloc(f->net,
|
||||
ocelot_setup_tc_block_cb_flower,
|
||||
port, port_block,
|
||||
ocelot_tc_block_unbind);
|
||||
if (IS_ERR(block_cb)) {
|
||||
ret = PTR_ERR(block_cb);
|
||||
goto err_cb_register;
|
||||
}
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, f->driver_block_list);
|
||||
} else {
|
||||
port_block = tcf_block_cb_priv(block_cb);
|
||||
port_block = flow_block_cb_priv(block_cb);
|
||||
}
|
||||
|
||||
tcf_block_cb_incref(block_cb);
|
||||
flow_block_cb_incref(block_cb);
|
||||
return 0;
|
||||
|
||||
err_cb_register:
|
||||
@ -338,20 +347,17 @@ err_cb_register:
|
||||
}
|
||||
|
||||
void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct ocelot_port_block *port_block;
|
||||
struct tcf_block_cb *block_cb;
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(f->block,
|
||||
ocelot_setup_tc_block_cb_flower, port);
|
||||
block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
|
||||
port);
|
||||
if (!block_cb)
|
||||
return;
|
||||
|
||||
port_block = tcf_block_cb_priv(block_cb);
|
||||
if (!tcf_block_cb_decref(block_cb)) {
|
||||
tcf_block_cb_unregister(f->block,
|
||||
ocelot_setup_tc_block_cb_flower, port);
|
||||
ocelot_port_block_destroy(port_block);
|
||||
if (!flow_block_cb_decref(block_cb)) {
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
}
|
||||
}
|
||||
|
@ -128,35 +128,54 @@ static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
|
||||
cb_priv, false);
|
||||
}
|
||||
|
||||
static LIST_HEAD(ocelot_block_cb_list);
|
||||
|
||||
static int ocelot_setup_tc_block(struct ocelot_port *port,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
tc_setup_cb_t *cb;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
|
||||
f->command, f->binder_type);
|
||||
|
||||
if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
|
||||
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
|
||||
cb = ocelot_setup_tc_block_cb_ig;
|
||||
port->tc.block_shared = tcf_block_shared(f->block);
|
||||
} else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
|
||||
port->tc.block_shared = f->block_shared;
|
||||
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
|
||||
cb = ocelot_setup_tc_block_cb_eg;
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
ret = tcf_block_cb_register(f->block, cb, port,
|
||||
port, f->extack);
|
||||
if (ret)
|
||||
return ret;
|
||||
f->driver_block_list = &ocelot_block_cb_list;
|
||||
|
||||
switch (f->command) {
|
||||
case FLOW_BLOCK_BIND:
|
||||
if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
|
||||
return -EBUSY;
|
||||
|
||||
block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL);
|
||||
if (IS_ERR(block_cb))
|
||||
return PTR_ERR(block_cb);
|
||||
|
||||
err = ocelot_setup_tc_block_flower_bind(port, f);
|
||||
if (err < 0) {
|
||||
flow_block_cb_free(block_cb);
|
||||
return err;
|
||||
}
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, f->driver_block_list);
|
||||
return 0;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
block_cb = flow_block_cb_lookup(f, cb, port);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
return ocelot_setup_tc_block_flower_bind(port, f);
|
||||
case TC_BLOCK_UNBIND:
|
||||
ocelot_setup_tc_block_flower_unbind(port, f);
|
||||
tcf_block_cb_unregister(f->block, cb, port);
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -262,22 +262,12 @@ static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
|
||||
}
|
||||
}
|
||||
|
||||
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
static LIST_HEAD(nfp_abm_block_cb_list);
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block,
|
||||
nfp_abm_setup_tc_block_cb,
|
||||
repr, repr, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
|
||||
repr);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list,
|
||||
nfp_abm_setup_tc_block_cb,
|
||||
repr, repr, true);
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
|
||||
int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
|
||||
struct tc_gred_qopt_offload *opt);
|
||||
int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
|
||||
struct tc_block_offload *opt);
|
||||
struct flow_block_offload *opt);
|
||||
|
||||
int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
|
||||
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
|
||||
|
@ -160,35 +160,19 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nfp_bpf_setup_tc_block(struct net_device *netdev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block,
|
||||
nfp_bpf_setup_tc_block_cb,
|
||||
nn, nn, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block,
|
||||
nfp_bpf_setup_tc_block_cb,
|
||||
nn);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(nfp_bpf_block_cb_list);
|
||||
|
||||
static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
||||
enum tc_setup_type type, void *type_data)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return nfp_bpf_setup_tc_block(netdev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&nfp_bpf_block_cb_list,
|
||||
nfp_bpf_setup_tc_block_cb,
|
||||
nn, nn, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
|
||||
}
|
||||
|
||||
static bool
|
||||
nfp_flower_tun_is_gre(struct tc_cls_flower_offload *flow, int start_idx)
|
||||
nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
|
||||
{
|
||||
struct flow_action_entry *act = flow->rule->action.entries;
|
||||
int num_act = flow->rule->action.num_entries;
|
||||
@ -188,7 +188,7 @@ nfp_flower_tun_is_gre(struct tc_cls_flower_offload *flow, int start_idx)
|
||||
|
||||
static enum nfp_flower_tun_type
|
||||
nfp_fl_get_tun_from_act(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
const struct flow_action_entry *act, int act_idx)
|
||||
{
|
||||
const struct ip_tunnel_info *tun = act->tunnel;
|
||||
@ -669,11 +669,11 @@ struct nfp_flower_pedit_acts {
|
||||
};
|
||||
|
||||
static int
|
||||
nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
|
||||
nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
|
||||
int *a_len, struct nfp_flower_pedit_acts *set_act,
|
||||
u32 *csum_updated)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
size_t act_size = 0;
|
||||
u8 ip_proto = 0;
|
||||
|
||||
@ -771,7 +771,7 @@ nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
|
||||
|
||||
static int
|
||||
nfp_fl_pedit(const struct flow_action_entry *act,
|
||||
struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
|
||||
struct flow_cls_offload *flow, char *nfp_action, int *a_len,
|
||||
u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -858,7 +858,7 @@ nfp_flower_output_action(struct nfp_app *app,
|
||||
|
||||
static int
|
||||
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct nfp_fl_payload *nfp_fl, int *a_len,
|
||||
struct net_device *netdev,
|
||||
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
|
||||
@ -1021,7 +1021,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
|
||||
}
|
||||
|
||||
int nfp_flower_compile_action(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
|
@ -343,19 +343,19 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow1,
|
||||
struct nfp_fl_payload *sub_flow2);
|
||||
int nfp_flower_compile_flow_match(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct nfp_fl_key_ls *key_ls,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
enum nfp_flower_tun_type tun_type,
|
||||
struct netlink_ext_ack *extack);
|
||||
int nfp_flower_compile_action(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct netlink_ext_ack *extack);
|
||||
int nfp_compile_flow_metadata(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct net_device *netdev,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
@ -10,9 +10,9 @@
|
||||
static void
|
||||
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk,
|
||||
struct tc_cls_flower_offload *flow, u8 key_type)
|
||||
struct flow_cls_offload *flow, u8 key_type)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
u16 tmp_tci;
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
|
||||
@ -78,9 +78,9 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
|
||||
static void
|
||||
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
|
||||
struct nfp_flower_mac_mpls *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
|
||||
@ -130,9 +130,9 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
|
||||
static void
|
||||
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
|
||||
struct nfp_flower_tp_ports *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
|
||||
@ -151,9 +151,9 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
|
||||
static void
|
||||
nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
|
||||
struct nfp_flower_ip_ext *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
struct flow_match_basic match;
|
||||
@ -225,9 +225,9 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
|
||||
static void
|
||||
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
|
||||
struct nfp_flower_ipv4 *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
|
||||
@ -247,9 +247,9 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
|
||||
static void
|
||||
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
|
||||
struct nfp_flower_ipv6 *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
|
||||
@ -269,7 +269,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
|
||||
|
||||
static int
|
||||
nfp_flower_compile_geneve_opt(void *ext, void *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_match_enc_opts match;
|
||||
|
||||
@ -283,9 +283,9 @@ nfp_flower_compile_geneve_opt(void *ext, void *msk,
|
||||
static void
|
||||
nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
|
||||
struct nfp_flower_tun_ipv4 *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
@ -301,9 +301,9 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
|
||||
static void
|
||||
nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
|
||||
struct nfp_flower_tun_ip_ext *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
|
||||
struct flow_match_ip match;
|
||||
@ -319,9 +319,9 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
|
||||
static void
|
||||
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
|
||||
struct nfp_flower_ipv4_gre_tun *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
|
||||
@ -348,9 +348,9 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
|
||||
static void
|
||||
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
|
||||
struct nfp_flower_ipv4_udp_tun *msk,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
|
||||
@ -371,7 +371,7 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
|
||||
}
|
||||
|
||||
int nfp_flower_compile_flow_match(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct nfp_fl_key_ls *key_ls,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
|
@ -290,7 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
|
||||
}
|
||||
|
||||
int nfp_compile_flow_metadata(struct nfp_app *app,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct net_device *netdev,
|
||||
struct netlink_ext_ack *extack)
|
||||
|
@ -121,9 +121,9 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
|
||||
static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
||||
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
|
||||
@ -131,9 +131,9 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
|
||||
}
|
||||
|
||||
static bool nfp_flower_check_higher_than_l3(struct tc_cls_flower_offload *f)
|
||||
static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
||||
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
|
||||
@ -212,11 +212,11 @@ static int
|
||||
nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_key_ls *ret_key_ls,
|
||||
struct tc_cls_flower_offload *flow,
|
||||
struct flow_cls_offload *flow,
|
||||
enum nfp_flower_tun_type *tun_type,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
struct flow_match_basic basic = { NULL, NULL};
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
@ -866,7 +866,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow1,
|
||||
struct nfp_fl_payload *sub_flow2)
|
||||
{
|
||||
struct tc_cls_flower_offload merge_tc_off;
|
||||
struct flow_cls_offload merge_tc_off;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
struct nfp_fl_payload *merge_flow;
|
||||
@ -962,7 +962,7 @@ err_destroy_merge_flow:
|
||||
*/
|
||||
static int
|
||||
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
@ -1125,7 +1125,7 @@ nfp_flower_del_linked_merge_flows(struct nfp_app *app,
|
||||
*/
|
||||
static int
|
||||
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
@ -1232,7 +1232,7 @@ nfp_flower_update_merge_stats(struct nfp_app *app,
|
||||
*/
|
||||
static int
|
||||
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flow)
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
@ -1265,17 +1265,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
|
||||
|
||||
static int
|
||||
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct tc_cls_flower_offload *flower)
|
||||
struct flow_cls_offload *flower)
|
||||
{
|
||||
if (!eth_proto_is_802_3(flower->common.protocol))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return nfp_flower_add_offload(app, netdev, flower);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return nfp_flower_del_offload(app, netdev, flower);
|
||||
case TC_CLSFLOWER_STATS:
|
||||
case FLOW_CLS_STATS:
|
||||
return nfp_flower_get_stats(app, netdev, flower);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -1302,27 +1302,45 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
|
||||
}
|
||||
}
|
||||
|
||||
static LIST_HEAD(nfp_block_cb_list);
|
||||
|
||||
static int nfp_flower_setup_tc_block(struct net_device *netdev,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
struct nfp_flower_repr_priv *repr_priv;
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
repr_priv = repr->app_priv;
|
||||
repr_priv->block_shared = tcf_block_shared(f->block);
|
||||
repr_priv->block_shared = f->block_shared;
|
||||
f->driver_block_list = &nfp_block_cb_list;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block,
|
||||
nfp_flower_setup_tc_block_cb,
|
||||
repr, repr, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block,
|
||||
nfp_flower_setup_tc_block_cb,
|
||||
repr);
|
||||
case FLOW_BLOCK_BIND:
|
||||
if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
|
||||
&nfp_block_cb_list))
|
||||
return -EBUSY;
|
||||
|
||||
block_cb = flow_block_cb_alloc(f->net,
|
||||
nfp_flower_setup_tc_block_cb,
|
||||
repr, repr, NULL);
|
||||
if (IS_ERR(block_cb))
|
||||
return PTR_ERR(block_cb);
|
||||
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
|
||||
return 0;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb,
|
||||
repr);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -1367,7 +1385,7 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv)
|
||||
{
|
||||
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
|
||||
struct tc_cls_flower_offload *flower = type_data;
|
||||
struct flow_cls_offload *flower = type_data;
|
||||
|
||||
if (flower->common.chain_index)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1381,21 +1399,29 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
|
||||
}
|
||||
}
|
||||
|
||||
static void nfp_flower_setup_indr_tc_release(void *cb_priv)
|
||||
{
|
||||
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
|
||||
|
||||
list_del(&priv->list);
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct nfp_flower_indr_block_cb_priv *cb_priv;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int err;
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
|
||||
!(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
|
||||
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
|
||||
!(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
|
||||
nfp_flower_internal_port_can_offload(app, netdev)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
case FLOW_BLOCK_BIND:
|
||||
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
|
||||
if (!cb_priv)
|
||||
return -ENOMEM;
|
||||
@ -1404,26 +1430,32 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
|
||||
cb_priv->app = app;
|
||||
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
|
||||
|
||||
err = tcf_block_cb_register(f->block,
|
||||
nfp_flower_setup_indr_block_cb,
|
||||
cb_priv, cb_priv, f->extack);
|
||||
if (err) {
|
||||
block_cb = flow_block_cb_alloc(f->net,
|
||||
nfp_flower_setup_indr_block_cb,
|
||||
cb_priv, cb_priv,
|
||||
nfp_flower_setup_indr_tc_release);
|
||||
if (IS_ERR(block_cb)) {
|
||||
list_del(&cb_priv->list);
|
||||
kfree(cb_priv);
|
||||
return PTR_ERR(block_cb);
|
||||
}
|
||||
|
||||
return err;
|
||||
case TC_BLOCK_UNBIND:
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
|
||||
return 0;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
|
||||
if (!cb_priv)
|
||||
return -ENOENT;
|
||||
|
||||
tcf_block_cb_unregister(f->block,
|
||||
nfp_flower_setup_indr_block_cb,
|
||||
cb_priv);
|
||||
list_del(&cb_priv->list);
|
||||
kfree(cb_priv);
|
||||
block_cb = flow_block_cb_lookup(f,
|
||||
nfp_flower_setup_indr_block_cb,
|
||||
cb_priv);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -551,7 +551,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq);
|
||||
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
|
||||
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
|
||||
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
|
||||
struct tc_cls_flower_offload *f);
|
||||
struct flow_cls_offload *f);
|
||||
|
||||
#define RX_RING_SIZE_POW 13
|
||||
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
|
||||
|
@ -1943,7 +1943,7 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
|
||||
}
|
||||
|
||||
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
|
||||
struct tc_cls_flower_offload *f)
|
||||
struct flow_cls_offload *f)
|
||||
{
|
||||
struct qede_arfs_fltr_node *n;
|
||||
int min_hlen, rc = -EINVAL;
|
||||
|
@ -548,13 +548,13 @@ static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
|
||||
}
|
||||
|
||||
static int
|
||||
qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
|
||||
qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
|
||||
__be16 proto)
|
||||
{
|
||||
switch (f->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
case FLOW_CLS_REPLACE:
|
||||
return qede_add_tc_flower_fltr(edev, proto, f);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
case FLOW_CLS_DESTROY:
|
||||
return qede_delete_flow_filter(edev, f->cookie);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -564,7 +564,7 @@ qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
|
||||
static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv)
|
||||
{
|
||||
struct tc_cls_flower_offload *f;
|
||||
struct flow_cls_offload *f;
|
||||
struct qede_dev *edev = cb_priv;
|
||||
|
||||
if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
|
||||
@ -579,24 +579,7 @@ static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int qede_setup_tc_block(struct qede_dev *edev,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block,
|
||||
qede_setup_tc_block_cb,
|
||||
edev, edev, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(qede_block_cb_list);
|
||||
|
||||
static int
|
||||
qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
|
||||
@ -607,7 +590,10 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return qede_setup_tc_block(edev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&qede_block_cb_list,
|
||||
qede_setup_tc_block_cb,
|
||||
edev, edev, true);
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
mqprio = type_data;
|
||||
|
||||
|
@ -3769,23 +3769,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stmmac_setup_tc_block(struct stmmac_priv *priv,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
|
||||
priv, priv, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
static LIST_HEAD(stmmac_block_cb_list);
|
||||
|
||||
static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
@ -3794,7 +3778,10 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return stmmac_setup_tc_block(priv, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&stmmac_block_cb_list,
|
||||
stmmac_setup_tc_block_cb,
|
||||
priv, priv, true);
|
||||
case TC_SETUP_QDISC_CBS:
|
||||
return stmmac_tc_setup_cbs(priv, priv, type_data);
|
||||
default:
|
||||
|
@ -78,26 +78,6 @@ nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
|
||||
return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
|
||||
}
|
||||
|
||||
static int
|
||||
nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
|
||||
{
|
||||
struct netdevsim *ns = netdev_priv(dev);
|
||||
|
||||
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
|
||||
ns, ns, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
|
||||
{
|
||||
struct netdevsim *ns = netdev_priv(dev);
|
||||
@ -223,12 +203,19 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static LIST_HEAD(nsim_block_cb_list);
|
||||
|
||||
static int
|
||||
nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
|
||||
{
|
||||
struct netdevsim *ns = netdev_priv(dev);
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_BLOCK:
|
||||
return nsim_setup_tc_block(dev, type_data);
|
||||
return flow_block_cb_setup_simple(type_data,
|
||||
&nsim_block_cb_list,
|
||||
nsim_setup_tc_block_cb,
|
||||
ns, ns, true);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <net/flow_dissector.h>
|
||||
#include <net/sch_generic.h>
|
||||
|
||||
struct flow_match {
|
||||
struct flow_dissector *dissector;
|
||||
@ -237,4 +238,99 @@ static inline void flow_stats_update(struct flow_stats *flow_stats,
|
||||
flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
|
||||
}
|
||||
|
||||
enum flow_block_command {
|
||||
FLOW_BLOCK_BIND,
|
||||
FLOW_BLOCK_UNBIND,
|
||||
};
|
||||
|
||||
enum flow_block_binder_type {
|
||||
FLOW_BLOCK_BINDER_TYPE_UNSPEC,
|
||||
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
||||
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
|
||||
};
|
||||
|
||||
struct netlink_ext_ack;
|
||||
|
||||
struct flow_block_offload {
|
||||
enum flow_block_command command;
|
||||
enum flow_block_binder_type binder_type;
|
||||
bool block_shared;
|
||||
struct net *net;
|
||||
struct list_head cb_list;
|
||||
struct list_head *driver_block_list;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
|
||||
struct flow_block_cb {
|
||||
struct list_head driver_list;
|
||||
struct list_head list;
|
||||
struct net *net;
|
||||
tc_setup_cb_t *cb;
|
||||
void *cb_ident;
|
||||
void *cb_priv;
|
||||
void (*release)(void *cb_priv);
|
||||
unsigned int refcnt;
|
||||
};
|
||||
|
||||
struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
|
||||
void *cb_ident, void *cb_priv,
|
||||
void (*release)(void *cb_priv));
|
||||
void flow_block_cb_free(struct flow_block_cb *block_cb);
|
||||
|
||||
struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *offload,
|
||||
tc_setup_cb_t *cb, void *cb_ident);
|
||||
|
||||
void *flow_block_cb_priv(struct flow_block_cb *block_cb);
|
||||
void flow_block_cb_incref(struct flow_block_cb *block_cb);
|
||||
unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
|
||||
|
||||
static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
|
||||
struct flow_block_offload *offload)
|
||||
{
|
||||
list_add_tail(&block_cb->list, &offload->cb_list);
|
||||
}
|
||||
|
||||
static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
|
||||
struct flow_block_offload *offload)
|
||||
{
|
||||
list_move(&block_cb->list, &offload->cb_list);
|
||||
}
|
||||
|
||||
bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
|
||||
struct list_head *driver_block_list);
|
||||
|
||||
int flow_block_cb_setup_simple(struct flow_block_offload *f,
|
||||
struct list_head *driver_list, tc_setup_cb_t *cb,
|
||||
void *cb_ident, void *cb_priv, bool ingress_only);
|
||||
|
||||
enum flow_cls_command {
|
||||
FLOW_CLS_REPLACE,
|
||||
FLOW_CLS_DESTROY,
|
||||
FLOW_CLS_STATS,
|
||||
FLOW_CLS_TMPLT_CREATE,
|
||||
FLOW_CLS_TMPLT_DESTROY,
|
||||
};
|
||||
|
||||
struct flow_cls_common_offload {
|
||||
u32 chain_index;
|
||||
__be16 protocol;
|
||||
u32 prio;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
|
||||
struct flow_cls_offload {
|
||||
struct flow_cls_common_offload common;
|
||||
enum flow_cls_command command;
|
||||
unsigned long cookie;
|
||||
struct flow_rule *rule;
|
||||
struct flow_stats stats;
|
||||
u32 classid;
|
||||
};
|
||||
|
||||
static inline struct flow_rule *
|
||||
flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
|
||||
{
|
||||
return flow_cmd->rule;
|
||||
}
|
||||
|
||||
#endif /* _NET_FLOW_OFFLOAD_H */
|
||||
|
@ -161,6 +161,7 @@ struct nft_ctx {
|
||||
const struct nlattr * const *nla;
|
||||
u32 portid;
|
||||
u32 seq;
|
||||
u16 flags;
|
||||
u8 family;
|
||||
u8 level;
|
||||
bool report;
|
||||
@ -735,6 +736,9 @@ enum nft_trans_phase {
|
||||
NFT_TRANS_RELEASE
|
||||
};
|
||||
|
||||
struct nft_flow_rule;
|
||||
struct nft_offload_ctx;
|
||||
|
||||
/**
|
||||
* struct nft_expr_ops - nf_tables expression operations
|
||||
*
|
||||
@ -777,6 +781,10 @@ struct nft_expr_ops {
|
||||
const struct nft_data **data);
|
||||
bool (*gc)(struct net *net,
|
||||
const struct nft_expr *expr);
|
||||
int (*offload)(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr);
|
||||
u32 offload_flags;
|
||||
const struct nft_expr_type *type;
|
||||
void *data;
|
||||
};
|
||||
@ -859,6 +867,7 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
|
||||
|
||||
enum nft_chain_flags {
|
||||
NFT_BASE_CHAIN = 0x1,
|
||||
NFT_CHAIN_HW_OFFLOAD = 0x2,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -942,6 +951,7 @@ struct nft_stats {
|
||||
* @stats: per-cpu chain stats
|
||||
* @chain: the chain
|
||||
* @dev_name: device name that this base chain is attached to (if any)
|
||||
* @cb_list: list of flow block callbacks (for hardware offload)
|
||||
*/
|
||||
struct nft_base_chain {
|
||||
struct nf_hook_ops ops;
|
||||
@ -951,6 +961,7 @@ struct nft_base_chain {
|
||||
struct nft_stats __percpu *stats;
|
||||
struct nft_chain chain;
|
||||
char dev_name[IFNAMSIZ];
|
||||
struct list_head cb_list;
|
||||
};
|
||||
|
||||
static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
|
||||
@ -1322,11 +1333,14 @@ struct nft_trans {
|
||||
|
||||
struct nft_trans_rule {
|
||||
struct nft_rule *rule;
|
||||
struct nft_flow_rule *flow;
|
||||
u32 rule_id;
|
||||
};
|
||||
|
||||
#define nft_trans_rule(trans) \
|
||||
(((struct nft_trans_rule *)trans->data)->rule)
|
||||
#define nft_trans_flow_rule(trans) \
|
||||
(((struct nft_trans_rule *)trans->data)->flow)
|
||||
#define nft_trans_rule_id(trans) \
|
||||
(((struct nft_trans_rule *)trans->data)->rule_id)
|
||||
|
||||
|
76
include/net/netfilter/nf_tables_offload.h
Normal file
76
include/net/netfilter/nf_tables_offload.h
Normal file
@ -0,0 +1,76 @@
|
||||
#ifndef _NET_NF_TABLES_OFFLOAD_H
|
||||
#define _NET_NF_TABLES_OFFLOAD_H
|
||||
|
||||
#include <net/flow_offload.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
struct nft_offload_reg {
|
||||
u32 key;
|
||||
u32 len;
|
||||
u32 base_offset;
|
||||
u32 offset;
|
||||
struct nft_data mask;
|
||||
};
|
||||
|
||||
enum nft_offload_dep_type {
|
||||
NFT_OFFLOAD_DEP_UNSPEC = 0,
|
||||
NFT_OFFLOAD_DEP_NETWORK,
|
||||
NFT_OFFLOAD_DEP_TRANSPORT,
|
||||
};
|
||||
|
||||
struct nft_offload_ctx {
|
||||
struct {
|
||||
enum nft_offload_dep_type type;
|
||||
__be16 l3num;
|
||||
u8 protonum;
|
||||
} dep;
|
||||
unsigned int num_actions;
|
||||
struct nft_offload_reg regs[NFT_REG32_15 + 1];
|
||||
};
|
||||
|
||||
void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
|
||||
enum nft_offload_dep_type type);
|
||||
void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
|
||||
const void *data, u32 len);
|
||||
|
||||
struct nft_flow_key {
|
||||
struct flow_dissector_key_basic basic;
|
||||
union {
|
||||
struct flow_dissector_key_ipv4_addrs ipv4;
|
||||
struct flow_dissector_key_ipv6_addrs ipv6;
|
||||
};
|
||||
struct flow_dissector_key_ports tp;
|
||||
struct flow_dissector_key_ip ip;
|
||||
struct flow_dissector_key_vlan vlan;
|
||||
struct flow_dissector_key_eth_addrs eth_addrs;
|
||||
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
|
||||
|
||||
struct nft_flow_match {
|
||||
struct flow_dissector dissector;
|
||||
struct nft_flow_key key;
|
||||
struct nft_flow_key mask;
|
||||
};
|
||||
|
||||
struct nft_flow_rule {
|
||||
__be16 proto;
|
||||
struct nft_flow_match match;
|
||||
struct flow_rule *rule;
|
||||
};
|
||||
|
||||
#define NFT_OFFLOAD_F_ACTION (1 << 0)
|
||||
|
||||
struct nft_rule;
|
||||
struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule);
|
||||
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
|
||||
int nft_flow_rule_offload_commit(struct net *net);
|
||||
|
||||
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
|
||||
(__reg)->base_offset = \
|
||||
offsetof(struct nft_flow_key, __base); \
|
||||
(__reg)->offset = \
|
||||
offsetof(struct nft_flow_key, __base.__field); \
|
||||
(__reg)->len = __len; \
|
||||
(__reg)->key = __key; \
|
||||
memset(&(__reg)->mask, 0xff, (__reg)->len);
|
||||
|
||||
#endif
|
@ -26,14 +26,8 @@ struct tcf_walker {
|
||||
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
|
||||
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
|
||||
|
||||
enum tcf_block_binder_type {
|
||||
TCF_BLOCK_BINDER_TYPE_UNSPEC,
|
||||
TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
||||
TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
|
||||
};
|
||||
|
||||
struct tcf_block_ext_info {
|
||||
enum tcf_block_binder_type binder_type;
|
||||
enum flow_block_binder_type binder_type;
|
||||
tcf_chain_head_change_t *chain_head_change;
|
||||
void *chain_head_change_priv;
|
||||
u32 block_index;
|
||||
@ -72,22 +66,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
|
||||
return block->q;
|
||||
}
|
||||
|
||||
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
|
||||
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident);
|
||||
void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
|
||||
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
|
||||
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv,
|
||||
struct netlink_ext_ack *extack);
|
||||
int tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv, struct netlink_ext_ack *extack);
|
||||
void __tcf_block_cb_unregister(struct tcf_block *block,
|
||||
struct tcf_block_cb *block_cb);
|
||||
void tcf_block_cb_unregister(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident);
|
||||
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident);
|
||||
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
@ -150,59 +128,6 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
int tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv, struct netlink_ext_ack *extack)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void __tcf_block_cb_unregister(struct tcf_block *block,
|
||||
struct tcf_block_cb *block_cb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void tcf_block_cb_unregister(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
tc_indr_block_bind_cb_t *cb, void *cb_ident)
|
||||
@ -610,25 +535,6 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
|
||||
void *type_data, bool err_stop);
|
||||
unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
|
||||
|
||||
enum tc_block_command {
|
||||
TC_BLOCK_BIND,
|
||||
TC_BLOCK_UNBIND,
|
||||
};
|
||||
|
||||
struct tc_block_offload {
|
||||
enum tc_block_command command;
|
||||
enum tcf_block_binder_type binder_type;
|
||||
struct tcf_block *block;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
|
||||
struct tc_cls_common_offload {
|
||||
u32 chain_index;
|
||||
__be16 protocol;
|
||||
u32 prio;
|
||||
struct netlink_ext_ack *extack;
|
||||
};
|
||||
|
||||
struct tc_cls_u32_knode {
|
||||
struct tcf_exts *exts;
|
||||
struct tcf_result *res;
|
||||
@ -656,7 +562,7 @@ enum tc_clsu32_command {
|
||||
};
|
||||
|
||||
struct tc_cls_u32_offload {
|
||||
struct tc_cls_common_offload common;
|
||||
struct flow_cls_common_offload common;
|
||||
/* knode values */
|
||||
enum tc_clsu32_command command;
|
||||
union {
|
||||
@ -683,7 +589,7 @@ static inline bool tc_can_offload_extack(const struct net_device *dev,
|
||||
|
||||
static inline bool
|
||||
tc_cls_can_offload_and_chain0(const struct net_device *dev,
|
||||
struct tc_cls_common_offload *common)
|
||||
struct flow_cls_common_offload *common)
|
||||
{
|
||||
if (!tc_can_offload_extack(dev, common->extack))
|
||||
return false;
|
||||
@ -725,7 +631,7 @@ static inline bool tc_in_hw(u32 flags)
|
||||
}
|
||||
|
||||
static inline void
|
||||
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
|
||||
tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
|
||||
const struct tcf_proto *tp, u32 flags,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -736,29 +642,6 @@ tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
|
||||
cls_common->extack = extack;
|
||||
}
|
||||
|
||||
enum tc_fl_command {
|
||||
TC_CLSFLOWER_REPLACE,
|
||||
TC_CLSFLOWER_DESTROY,
|
||||
TC_CLSFLOWER_STATS,
|
||||
TC_CLSFLOWER_TMPLT_CREATE,
|
||||
TC_CLSFLOWER_TMPLT_DESTROY,
|
||||
};
|
||||
|
||||
struct tc_cls_flower_offload {
|
||||
struct tc_cls_common_offload common;
|
||||
enum tc_fl_command command;
|
||||
unsigned long cookie;
|
||||
struct flow_rule *rule;
|
||||
struct flow_stats stats;
|
||||
u32 classid;
|
||||
};
|
||||
|
||||
static inline struct flow_rule *
|
||||
tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
|
||||
{
|
||||
return tc_flow_cmd->rule;
|
||||
}
|
||||
|
||||
enum tc_matchall_command {
|
||||
TC_CLSMATCHALL_REPLACE,
|
||||
TC_CLSMATCHALL_DESTROY,
|
||||
@ -766,7 +649,7 @@ enum tc_matchall_command {
|
||||
};
|
||||
|
||||
struct tc_cls_matchall_offload {
|
||||
struct tc_cls_common_offload common;
|
||||
struct flow_cls_common_offload common;
|
||||
enum tc_matchall_command command;
|
||||
struct flow_rule *rule;
|
||||
struct flow_stats stats;
|
||||
@ -779,7 +662,7 @@ enum tc_clsbpf_command {
|
||||
};
|
||||
|
||||
struct tc_cls_bpf_offload {
|
||||
struct tc_cls_common_offload common;
|
||||
struct flow_cls_common_offload common;
|
||||
enum tc_clsbpf_command command;
|
||||
struct tcf_exts *exts;
|
||||
struct bpf_prog *prog;
|
||||
|
@ -192,6 +192,7 @@ enum nft_table_attributes {
|
||||
* @NFTA_CHAIN_USE: number of references to this chain (NLA_U32)
|
||||
* @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
|
||||
* @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
|
||||
* @NFTA_CHAIN_FLAGS: chain flags
|
||||
*/
|
||||
enum nft_chain_attributes {
|
||||
NFTA_CHAIN_UNSPEC,
|
||||
@ -204,6 +205,7 @@ enum nft_chain_attributes {
|
||||
NFTA_CHAIN_TYPE,
|
||||
NFTA_CHAIN_COUNTERS,
|
||||
NFTA_CHAIN_PAD,
|
||||
NFTA_CHAIN_FLAGS,
|
||||
__NFTA_CHAIN_MAX
|
||||
};
|
||||
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
|
||||
|
@ -164,3 +164,121 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
|
||||
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
|
||||
}
|
||||
EXPORT_SYMBOL(flow_rule_match_enc_opts);
|
||||
|
||||
struct flow_block_cb *flow_block_cb_alloc(struct net *net, tc_setup_cb_t *cb,
|
||||
void *cb_ident, void *cb_priv,
|
||||
void (*release)(void *cb_priv))
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
|
||||
if (!block_cb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
block_cb->net = net;
|
||||
block_cb->cb = cb;
|
||||
block_cb->cb_ident = cb_ident;
|
||||
block_cb->cb_priv = cb_priv;
|
||||
block_cb->release = release;
|
||||
|
||||
return block_cb;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_alloc);
|
||||
|
||||
void flow_block_cb_free(struct flow_block_cb *block_cb)
|
||||
{
|
||||
if (block_cb->release)
|
||||
block_cb->release(block_cb->cb_priv);
|
||||
|
||||
kfree(block_cb);
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_free);
|
||||
|
||||
struct flow_block_cb *flow_block_cb_lookup(struct flow_block_offload *f,
|
||||
tc_setup_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
list_for_each_entry(block_cb, f->driver_block_list, driver_list) {
|
||||
if (block_cb->net == f->net &&
|
||||
block_cb->cb == cb &&
|
||||
block_cb->cb_ident == cb_ident)
|
||||
return block_cb;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_lookup);
|
||||
|
||||
void *flow_block_cb_priv(struct flow_block_cb *block_cb)
|
||||
{
|
||||
return block_cb->cb_priv;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_priv);
|
||||
|
||||
void flow_block_cb_incref(struct flow_block_cb *block_cb)
|
||||
{
|
||||
block_cb->refcnt++;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_incref);
|
||||
|
||||
unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
|
||||
{
|
||||
return --block_cb->refcnt;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_decref);
|
||||
|
||||
bool flow_block_cb_is_busy(tc_setup_cb_t *cb, void *cb_ident,
|
||||
struct list_head *driver_block_list)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
list_for_each_entry(block_cb, driver_block_list, driver_list) {
|
||||
if (block_cb->cb == cb &&
|
||||
block_cb->cb_ident == cb_ident)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_is_busy);
|
||||
|
||||
int flow_block_cb_setup_simple(struct flow_block_offload *f,
|
||||
struct list_head *driver_block_list,
|
||||
tc_setup_cb_t *cb, void *cb_ident, void *cb_priv,
|
||||
bool ingress_only)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
|
||||
if (ingress_only &&
|
||||
f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
f->driver_block_list = driver_block_list;
|
||||
|
||||
switch (f->command) {
|
||||
case FLOW_BLOCK_BIND:
|
||||
if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
|
||||
return -EBUSY;
|
||||
|
||||
block_cb = flow_block_cb_alloc(f->net, cb, cb_ident,
|
||||
cb_priv, NULL);
|
||||
if (IS_ERR(block_cb))
|
||||
return PTR_ERR(block_cb);
|
||||
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, driver_block_list);
|
||||
return 0;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
block_cb = flow_block_cb_lookup(f, cb, cb_ident);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flow_block_cb_setup_simple);
|
||||
|
@ -942,23 +942,42 @@ static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
|
||||
return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
|
||||
}
|
||||
|
||||
static LIST_HEAD(dsa_slave_block_cb_list);
|
||||
|
||||
static int dsa_slave_setup_tc_block(struct net_device *dev,
|
||||
struct tc_block_offload *f)
|
||||
struct flow_block_offload *f)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
tc_setup_cb_t *cb;
|
||||
|
||||
if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
cb = dsa_slave_setup_tc_block_cb_ig;
|
||||
else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
cb = dsa_slave_setup_tc_block_cb_eg;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
f->driver_block_list = &dsa_slave_block_cb_list;
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, cb, dev, dev, f->extack);
|
||||
case TC_BLOCK_UNBIND:
|
||||
tcf_block_cb_unregister(f->block, cb, dev);
|
||||
case FLOW_BLOCK_BIND:
|
||||
if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
|
||||
return -EBUSY;
|
||||
|
||||
block_cb = flow_block_cb_alloc(f->net, cb, dev, dev, NULL);
|
||||
if (IS_ERR(block_cb))
|
||||
return PTR_ERR(block_cb);
|
||||
|
||||
flow_block_cb_add(block_cb, f);
|
||||
list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
|
||||
return 0;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
block_cb = flow_block_cb_lookup(f, cb, dev);
|
||||
if (!block_cb)
|
||||
return -ENOENT;
|
||||
|
||||
flow_block_cb_remove(block_cb, f);
|
||||
list_del(&block_cb->driver_list);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -78,7 +78,7 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
|
||||
nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
|
||||
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
|
||||
nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o \
|
||||
nft_chain_route.o
|
||||
nft_chain_route.o nf_tables_offload.o
|
||||
|
||||
nf_tables_set-objs := nf_tables_set_core.o \
|
||||
nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_tables_core.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_offload.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
@ -97,6 +98,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
|
||||
ctx->nla = nla;
|
||||
ctx->portid = NETLINK_CB(skb).portid;
|
||||
ctx->report = nlmsg_report(nlh);
|
||||
ctx->flags = nlh->nlmsg_flags;
|
||||
ctx->seq = nlh->nlmsg_seq;
|
||||
}
|
||||
|
||||
@ -1169,6 +1171,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
|
||||
[NFTA_CHAIN_POLICY] = { .type = NLA_U32 },
|
||||
[NFTA_CHAIN_TYPE] = { .type = NLA_STRING },
|
||||
[NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
|
||||
[NFTA_CHAIN_FLAGS] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
|
||||
@ -1603,7 +1606,7 @@ static struct nft_rule **nf_tables_chain_alloc_rules(const struct nft_chain *cha
|
||||
}
|
||||
|
||||
static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
||||
u8 policy)
|
||||
u8 policy, u32 flags)
|
||||
{
|
||||
const struct nlattr * const *nla = ctx->nla;
|
||||
struct nft_table *table = ctx->table;
|
||||
@ -1657,8 +1660,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
|
||||
ops->hook = hook.type->hooks[ops->hooknum];
|
||||
ops->dev = hook.dev;
|
||||
|
||||
chain->flags |= NFT_BASE_CHAIN;
|
||||
chain->flags |= NFT_BASE_CHAIN | flags;
|
||||
basechain->policy = NF_ACCEPT;
|
||||
INIT_LIST_HEAD(&basechain->cb_list);
|
||||
} else {
|
||||
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
|
||||
if (chain == NULL)
|
||||
@ -1718,7 +1722,8 @@ err1:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy)
|
||||
static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
||||
u32 flags)
|
||||
{
|
||||
const struct nlattr * const *nla = ctx->nla;
|
||||
struct nft_table *table = ctx->table;
|
||||
@ -1730,6 +1735,9 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy)
|
||||
struct nft_trans *trans;
|
||||
int err;
|
||||
|
||||
if (chain->flags ^ flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nla[NFTA_CHAIN_HOOK]) {
|
||||
if (!nft_is_base_chain(chain))
|
||||
return -EBUSY;
|
||||
@ -1835,6 +1843,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
|
||||
u8 policy = NF_ACCEPT;
|
||||
struct nft_ctx ctx;
|
||||
u64 handle = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
lockdep_assert_held(&net->nft.commit_mutex);
|
||||
|
||||
@ -1889,6 +1898,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
|
||||
}
|
||||
}
|
||||
|
||||
if (nla[NFTA_CHAIN_FLAGS])
|
||||
flags = ntohl(nla_get_be32(nla[NFTA_CHAIN_FLAGS]));
|
||||
|
||||
nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
|
||||
|
||||
if (chain != NULL) {
|
||||
@ -1899,10 +1911,10 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
|
||||
if (nlh->nlmsg_flags & NLM_F_REPLACE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return nf_tables_updchain(&ctx, genmask, policy);
|
||||
return nf_tables_updchain(&ctx, genmask, policy, flags);
|
||||
}
|
||||
|
||||
return nf_tables_addchain(&ctx, family, genmask, policy);
|
||||
return nf_tables_addchain(&ctx, family, genmask, policy, flags);
|
||||
}
|
||||
|
||||
static int nf_tables_delchain(struct net *net, struct sock *nlsk,
|
||||
@ -2658,6 +2670,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
||||
u8 genmask = nft_genmask_next(net);
|
||||
struct nft_expr_info *info = NULL;
|
||||
int family = nfmsg->nfgen_family;
|
||||
struct nft_flow_rule *flow;
|
||||
struct nft_table *table;
|
||||
struct nft_chain *chain;
|
||||
struct nft_rule *rule, *old_rule = NULL;
|
||||
@ -2804,7 +2817,8 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
||||
|
||||
list_add_tail_rcu(&rule->list, &old_rule->list);
|
||||
} else {
|
||||
if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
|
||||
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
|
||||
if (!trans) {
|
||||
err = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
@ -2827,6 +2841,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
|
||||
if (net->nft.validate_state == NFT_VALIDATE_DO)
|
||||
return nft_table_validate(net, table);
|
||||
|
||||
if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
|
||||
flow = nft_flow_rule_create(rule);
|
||||
if (IS_ERR(flow))
|
||||
return PTR_ERR(flow);
|
||||
|
||||
nft_trans_flow_rule(trans) = flow;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err2:
|
||||
nf_tables_rule_release(&ctx, rule);
|
||||
@ -6624,6 +6646,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
struct nft_trans_elem *te;
|
||||
struct nft_chain *chain;
|
||||
struct nft_table *table;
|
||||
int err;
|
||||
|
||||
if (list_empty(&net->nft.commit_list)) {
|
||||
mutex_unlock(&net->nft.commit_mutex);
|
||||
@ -6634,6 +6657,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
if (nf_tables_validate(net) < 0)
|
||||
return -EAGAIN;
|
||||
|
||||
err = nft_flow_rule_offload_commit(net);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* 1. Allocate space for next generation rules_gen_X[] */
|
||||
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
|
||||
int ret;
|
||||
|
267
net/netfilter/nf_tables_offload.c
Normal file
267
net/netfilter/nf_tables_offload.c
Normal file
@ -0,0 +1,267 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/flow_offload.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_offload.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
|
||||
{
|
||||
struct nft_flow_rule *flow;
|
||||
|
||||
flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
|
||||
if (!flow)
|
||||
return NULL;
|
||||
|
||||
flow->rule = flow_rule_alloc(num_actions);
|
||||
if (!flow->rule) {
|
||||
kfree(flow);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
flow->rule->match.dissector = &flow->match.dissector;
|
||||
flow->rule->match.mask = &flow->match.mask;
|
||||
flow->rule->match.key = &flow->match.key;
|
||||
|
||||
return flow;
|
||||
}
|
||||
|
||||
struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule)
|
||||
{
|
||||
struct nft_offload_ctx ctx = {
|
||||
.dep = {
|
||||
.type = NFT_OFFLOAD_DEP_UNSPEC,
|
||||
},
|
||||
};
|
||||
struct nft_flow_rule *flow;
|
||||
int num_actions = 0, err;
|
||||
struct nft_expr *expr;
|
||||
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr->ops && expr != nft_expr_last(rule)) {
|
||||
if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
|
||||
num_actions++;
|
||||
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
|
||||
flow = nft_flow_rule_alloc(num_actions);
|
||||
if (!flow)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr->ops && expr != nft_expr_last(rule)) {
|
||||
if (!expr->ops->offload) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
}
|
||||
err = expr->ops->offload(&ctx, flow, expr);
|
||||
if (err < 0)
|
||||
goto err_out;
|
||||
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
flow->proto = ctx.dep.l3num;
|
||||
|
||||
return flow;
|
||||
err_out:
|
||||
nft_flow_rule_destroy(flow);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void nft_flow_rule_destroy(struct nft_flow_rule *flow)
|
||||
{
|
||||
kfree(flow->rule);
|
||||
kfree(flow);
|
||||
}
|
||||
|
||||
void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
|
||||
enum nft_offload_dep_type type)
|
||||
{
|
||||
ctx->dep.type = type;
|
||||
}
|
||||
|
||||
void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
|
||||
const void *data, u32 len)
|
||||
{
|
||||
switch (ctx->dep.type) {
|
||||
case NFT_OFFLOAD_DEP_NETWORK:
|
||||
WARN_ON(len != sizeof(__u16));
|
||||
memcpy(&ctx->dep.l3num, data, sizeof(__u16));
|
||||
break;
|
||||
case NFT_OFFLOAD_DEP_TRANSPORT:
|
||||
WARN_ON(len != sizeof(__u8));
|
||||
memcpy(&ctx->dep.protonum, data, sizeof(__u8));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
|
||||
}
|
||||
|
||||
static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
|
||||
__be16 proto,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
common->protocol = proto;
|
||||
common->extack = extack;
|
||||
}
|
||||
|
||||
static int nft_setup_cb_call(struct nft_base_chain *basechain,
|
||||
enum tc_setup_type type, void *type_data)
|
||||
{
|
||||
struct flow_block_cb *block_cb;
|
||||
int err;
|
||||
|
||||
list_for_each_entry(block_cb, &basechain->cb_list, list) {
|
||||
err = block_cb->cb(type, type_data, block_cb->cb_priv);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_flow_offload_rule(struct nft_trans *trans,
|
||||
enum flow_cls_command command)
|
||||
{
|
||||
struct nft_flow_rule *flow = nft_trans_flow_rule(trans);
|
||||
struct nft_rule *rule = nft_trans_rule(trans);
|
||||
struct flow_cls_offload cls_flow = {};
|
||||
struct nft_base_chain *basechain;
|
||||
struct netlink_ext_ack extack;
|
||||
__be16 proto = ETH_P_ALL;
|
||||
|
||||
if (!nft_is_base_chain(trans->ctx.chain))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
basechain = nft_base_chain(trans->ctx.chain);
|
||||
|
||||
if (flow)
|
||||
proto = flow->proto;
|
||||
|
||||
nft_flow_offload_common_init(&cls_flow.common, proto, &extack);
|
||||
cls_flow.command = command;
|
||||
cls_flow.cookie = (unsigned long) rule;
|
||||
if (flow)
|
||||
cls_flow.rule = flow->rule;
|
||||
|
||||
return nft_setup_cb_call(basechain, TC_SETUP_CLSFLOWER, &cls_flow);
|
||||
}
|
||||
|
||||
static int nft_flow_offload_bind(struct flow_block_offload *bo,
|
||||
struct nft_base_chain *basechain)
|
||||
{
|
||||
list_splice(&bo->cb_list, &basechain->cb_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_flow_offload_unbind(struct flow_block_offload *bo,
|
||||
struct nft_base_chain *basechain)
|
||||
{
|
||||
struct flow_block_cb *block_cb, *next;
|
||||
|
||||
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
|
||||
list_del(&block_cb->list);
|
||||
flow_block_cb_free(block_cb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
|
||||
|
||||
static int nft_flow_offload_chain(struct nft_trans *trans,
|
||||
enum flow_block_command cmd)
|
||||
{
|
||||
struct nft_chain *chain = trans->ctx.chain;
|
||||
struct netlink_ext_ack extack = {};
|
||||
struct flow_block_offload bo = {};
|
||||
struct nft_base_chain *basechain;
|
||||
struct net_device *dev;
|
||||
int err;
|
||||
|
||||
if (!nft_is_base_chain(chain))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
basechain = nft_base_chain(chain);
|
||||
dev = basechain->ops.dev;
|
||||
if (!dev || !dev->netdev_ops->ndo_setup_tc)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Only default policy to accept is supported for now. */
|
||||
if (cmd == FLOW_BLOCK_BIND &&
|
||||
nft_trans_chain_policy(trans) != -1 &&
|
||||
nft_trans_chain_policy(trans) != NF_ACCEPT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bo.command = cmd;
|
||||
bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
bo.extack = &extack;
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, FLOW_SETUP_BLOCK, &bo);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
switch (cmd) {
|
||||
case FLOW_BLOCK_BIND:
|
||||
err = nft_flow_offload_bind(&bo, basechain);
|
||||
break;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
err = nft_flow_offload_unbind(&bo, basechain);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int nft_flow_rule_offload_commit(struct net *net)
|
||||
{
|
||||
struct nft_trans *trans;
|
||||
int err = 0;
|
||||
|
||||
list_for_each_entry(trans, &net->nft.commit_list, list) {
|
||||
if (trans->ctx.family != NFPROTO_NETDEV)
|
||||
continue;
|
||||
|
||||
switch (trans->msg_type) {
|
||||
case NFT_MSG_NEWCHAIN:
|
||||
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
|
||||
continue;
|
||||
|
||||
err = nft_flow_offload_chain(trans, FLOW_BLOCK_BIND);
|
||||
break;
|
||||
case NFT_MSG_DELCHAIN:
|
||||
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
|
||||
continue;
|
||||
|
||||
err = nft_flow_offload_chain(trans, FLOW_BLOCK_UNBIND);
|
||||
break;
|
||||
case NFT_MSG_NEWRULE:
|
||||
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
|
||||
continue;
|
||||
|
||||
if (trans->ctx.flags & NLM_F_REPLACE ||
|
||||
!(trans->ctx.flags & NLM_F_APPEND))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = nft_flow_offload_rule(trans, FLOW_CLS_REPLACE);
|
||||
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
|
||||
break;
|
||||
case NFT_MSG_DELRULE:
|
||||
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
|
||||
continue;
|
||||
|
||||
err = nft_flow_offload_rule(trans, FLOW_CLS_DESTROY);
|
||||
break;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
@ -12,6 +12,7 @@
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_core.h>
|
||||
#include <net/netfilter/nf_tables_offload.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
struct nft_cmp_expr {
|
||||
@ -107,12 +108,44 @@ nla_put_failure:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_cmp_expr *priv)
|
||||
{
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
|
||||
u8 *mask = (u8 *)&flow->match.mask;
|
||||
u8 *key = (u8 *)&flow->match.key;
|
||||
|
||||
if (priv->op != NFT_CMP_EQ)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memcpy(key + reg->offset, &priv->data, priv->len);
|
||||
memcpy(mask + reg->offset, ®->mask, priv->len);
|
||||
|
||||
flow->match.dissector.used_keys |= BIT(reg->key);
|
||||
flow->match.dissector.offset[reg->key] = reg->base_offset;
|
||||
|
||||
nft_offload_update_dependency(ctx, &priv->data, priv->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_cmp_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_cmp_expr *priv = nft_expr_priv(expr);
|
||||
|
||||
return __nft_cmp_offload(ctx, flow, priv);
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops nft_cmp_ops = {
|
||||
.type = &nft_cmp_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
|
||||
.eval = nft_cmp_eval,
|
||||
.init = nft_cmp_init,
|
||||
.dump = nft_cmp_dump,
|
||||
.offload = nft_cmp_offload,
|
||||
};
|
||||
|
||||
static int nft_cmp_fast_init(const struct nft_ctx *ctx,
|
||||
@ -143,6 +176,25 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
|
||||
struct nft_cmp_expr cmp = {
|
||||
.data = {
|
||||
.data = {
|
||||
[0] = priv->data,
|
||||
},
|
||||
},
|
||||
.sreg = priv->sreg,
|
||||
.len = priv->len / BITS_PER_BYTE,
|
||||
.op = NFT_CMP_EQ,
|
||||
};
|
||||
|
||||
return __nft_cmp_offload(ctx, flow, &cmp);
|
||||
}
|
||||
|
||||
static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
|
||||
@ -169,6 +221,7 @@ const struct nft_expr_ops nft_cmp_fast_ops = {
|
||||
.eval = NULL, /* inlined */
|
||||
.init = nft_cmp_fast_init,
|
||||
.dump = nft_cmp_fast_dump,
|
||||
.offload = nft_cmp_fast_offload,
|
||||
};
|
||||
|
||||
static const struct nft_expr_ops *
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_core.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_offload.h>
|
||||
|
||||
void nft_immediate_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
@ -124,6 +125,34 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_immediate_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
||||
struct flow_action_entry *entry;
|
||||
const struct nft_data *data;
|
||||
|
||||
if (priv->dreg != NFT_REG_VERDICT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
entry = &flow->rule->action.entries[ctx->num_actions++];
|
||||
|
||||
data = &priv->data;
|
||||
switch (data->verdict.code) {
|
||||
case NF_ACCEPT:
|
||||
entry->id = FLOW_ACTION_ACCEPT;
|
||||
break;
|
||||
case NF_DROP:
|
||||
entry->id = FLOW_ACTION_DROP;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops nft_imm_ops = {
|
||||
.type = &nft_imm_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
|
||||
@ -133,6 +162,8 @@ static const struct nft_expr_ops nft_imm_ops = {
|
||||
.deactivate = nft_immediate_deactivate,
|
||||
.dump = nft_immediate_dump,
|
||||
.validate = nft_immediate_validate,
|
||||
.offload = nft_immediate_offload,
|
||||
.offload_flags = NFT_OFFLOAD_F_ACTION,
|
||||
};
|
||||
|
||||
struct nft_expr_type nft_imm_type __read_mostly = {
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_core.h>
|
||||
#include <net/netfilter/nft_meta.h>
|
||||
#include <net/netfilter/nf_tables_offload.h>
|
||||
|
||||
#include <uapi/linux/netfilter_bridge.h> /* NF_BR_PRE_ROUTING */
|
||||
|
||||
@ -490,6 +491,31 @@ void nft_meta_set_destroy(const struct nft_ctx *ctx,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_meta_set_destroy);
|
||||
|
||||
static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_meta *priv = nft_expr_priv(expr);
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
switch (priv->key) {
|
||||
case NFT_META_PROTOCOL:
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
|
||||
sizeof(__u16), reg);
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
|
||||
break;
|
||||
case NFT_META_L4PROTO:
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
sizeof(__u8), reg);
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops nft_meta_get_ops = {
|
||||
.type = &nft_meta_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
|
||||
@ -497,6 +523,7 @@ static const struct nft_expr_ops nft_meta_get_ops = {
|
||||
.init = nft_meta_get_init,
|
||||
.dump = nft_meta_get_dump,
|
||||
.validate = nft_meta_get_validate,
|
||||
.offload = nft_meta_get_offload,
|
||||
};
|
||||
|
||||
static const struct nft_expr_ops nft_meta_set_ops = {
|
||||
|
@ -15,10 +15,13 @@
|
||||
#include <linux/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_core.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
#include <net/netfilter/nf_tables_offload.h>
|
||||
/* For layer 4 checksum field offset. */
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
|
||||
/* add vlan header into the user buffer for if tag was removed by offloads */
|
||||
static bool
|
||||
@ -150,12 +153,195 @@ nla_put_failure:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct ethhdr, h_source):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
|
||||
src, ETH_ALEN, reg);
|
||||
break;
|
||||
case offsetof(struct ethhdr, h_dest):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
|
||||
dst, ETH_ALEN, reg);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct iphdr, saddr):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
|
||||
sizeof(struct in_addr), reg);
|
||||
break;
|
||||
case offsetof(struct iphdr, daddr):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
|
||||
sizeof(struct in_addr), reg);
|
||||
break;
|
||||
case offsetof(struct iphdr, protocol):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
sizeof(__u8), reg);
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct ipv6hdr, saddr):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
|
||||
sizeof(struct in6_addr), reg);
|
||||
break;
|
||||
case offsetof(struct ipv6hdr, daddr):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
|
||||
sizeof(struct in6_addr), reg);
|
||||
break;
|
||||
case offsetof(struct ipv6hdr, nexthdr):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
|
||||
sizeof(__u8), reg);
|
||||
nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
switch (ctx->dep.l3num) {
|
||||
case htons(ETH_P_IP):
|
||||
err = nft_payload_offload_ip(ctx, flow, priv);
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
err = nft_payload_offload_ip6(ctx, flow, priv);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct tcphdr, source):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
|
||||
sizeof(__be16), reg);
|
||||
break;
|
||||
case offsetof(struct tcphdr, dest):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
|
||||
sizeof(__be16), reg);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
|
||||
|
||||
switch (priv->offset) {
|
||||
case offsetof(struct udphdr, source):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
|
||||
sizeof(__be16), reg);
|
||||
break;
|
||||
case offsetof(struct udphdr, dest):
|
||||
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
|
||||
sizeof(__be16), reg);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_payload *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
switch (ctx->dep.protonum) {
|
||||
case IPPROTO_TCP:
|
||||
err = nft_payload_offload_tcp(ctx, flow, priv);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
err = nft_payload_offload_udp(ctx, flow, priv);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nft_payload_offload(struct nft_offload_ctx *ctx,
|
||||
struct nft_flow_rule *flow,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
const struct nft_payload *priv = nft_expr_priv(expr);
|
||||
int err;
|
||||
|
||||
switch (priv->base) {
|
||||
case NFT_PAYLOAD_LL_HEADER:
|
||||
err = nft_payload_offload_ll(ctx, flow, priv);
|
||||
break;
|
||||
case NFT_PAYLOAD_NETWORK_HEADER:
|
||||
err = nft_payload_offload_nh(ctx, flow, priv);
|
||||
break;
|
||||
case NFT_PAYLOAD_TRANSPORT_HEADER:
|
||||
err = nft_payload_offload_th(ctx, flow, priv);
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops nft_payload_ops = {
|
||||
.type = &nft_payload_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
|
||||
.eval = nft_payload_eval,
|
||||
.init = nft_payload_init,
|
||||
.dump = nft_payload_dump,
|
||||
.offload = nft_payload_offload,
|
||||
};
|
||||
|
||||
const struct nft_expr_ops nft_payload_fast_ops = {
|
||||
@ -164,6 +350,7 @@ const struct nft_expr_ops nft_payload_fast_ops = {
|
||||
.eval = nft_payload_eval,
|
||||
.init = nft_payload_init,
|
||||
.dump = nft_payload_dump,
|
||||
.offload = nft_payload_offload,
|
||||
};
|
||||
|
||||
static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
|
||||
|
@ -673,21 +673,27 @@ static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
|
||||
kfree(indr_block_cb);
|
||||
}
|
||||
|
||||
static int tcf_block_setup(struct tcf_block *block,
|
||||
struct flow_block_offload *bo);
|
||||
|
||||
static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
|
||||
struct tc_indr_block_cb *indr_block_cb,
|
||||
enum tc_block_command command)
|
||||
enum flow_block_command command)
|
||||
{
|
||||
struct tc_block_offload bo = {
|
||||
struct flow_block_offload bo = {
|
||||
.command = command,
|
||||
.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
||||
.block = indr_dev->block,
|
||||
.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
||||
.net = dev_net(indr_dev->dev),
|
||||
.block_shared = tcf_block_shared(indr_dev->block),
|
||||
};
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
|
||||
if (!indr_dev->block)
|
||||
return;
|
||||
|
||||
indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
|
||||
&bo);
|
||||
tcf_block_setup(indr_dev->block, &bo);
|
||||
}
|
||||
|
||||
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
@ -706,7 +712,7 @@ int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||
if (err)
|
||||
goto err_dev_put;
|
||||
|
||||
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
|
||||
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_BIND);
|
||||
return 0;
|
||||
|
||||
err_dev_put:
|
||||
@ -743,7 +749,7 @@ void __tc_indr_block_cb_unregister(struct net_device *dev,
|
||||
return;
|
||||
|
||||
/* Send unbind message if required to free any block cbs. */
|
||||
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
|
||||
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_UNBIND);
|
||||
tc_indr_block_cb_del(indr_block_cb);
|
||||
tc_indr_block_dev_put(indr_dev);
|
||||
}
|
||||
@ -760,27 +766,31 @@ EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
|
||||
|
||||
static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum tc_block_command command,
|
||||
enum flow_block_command command,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_indr_block_cb *indr_block_cb;
|
||||
struct tc_indr_block_dev *indr_dev;
|
||||
struct tc_block_offload bo = {
|
||||
struct flow_block_offload bo = {
|
||||
.command = command,
|
||||
.binder_type = ei->binder_type,
|
||||
.block = block,
|
||||
.net = dev_net(dev),
|
||||
.block_shared = tcf_block_shared(block),
|
||||
.extack = extack,
|
||||
};
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
|
||||
indr_dev = tc_indr_block_dev_lookup(dev);
|
||||
if (!indr_dev)
|
||||
return;
|
||||
|
||||
indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
|
||||
indr_dev->block = command == FLOW_BLOCK_BIND ? block : NULL;
|
||||
|
||||
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
|
||||
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
|
||||
&bo);
|
||||
|
||||
tcf_block_setup(block, &bo);
|
||||
}
|
||||
|
||||
static bool tcf_block_offload_in_use(struct tcf_block *block)
|
||||
@ -791,16 +801,24 @@ static bool tcf_block_offload_in_use(struct tcf_block *block)
|
||||
static int tcf_block_offload_cmd(struct tcf_block *block,
|
||||
struct net_device *dev,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum tc_block_command command,
|
||||
enum flow_block_command command,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_block_offload bo = {};
|
||||
struct flow_block_offload bo = {};
|
||||
int err;
|
||||
|
||||
bo.net = dev_net(dev);
|
||||
bo.command = command;
|
||||
bo.binder_type = ei->binder_type;
|
||||
bo.block = block;
|
||||
bo.block_shared = tcf_block_shared(block);
|
||||
bo.extack = extack;
|
||||
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
|
||||
INIT_LIST_HEAD(&bo.cb_list);
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return tcf_block_setup(block, &bo);
|
||||
}
|
||||
|
||||
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
||||
@ -821,20 +839,20 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
|
||||
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_inc;
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
|
||||
tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
|
||||
return 0;
|
||||
|
||||
no_offload_dev_inc:
|
||||
if (tcf_block_offload_in_use(block))
|
||||
return -EOPNOTSUPP;
|
||||
block->nooffloaddevcnt++;
|
||||
tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
|
||||
tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -844,11 +862,11 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
|
||||
struct net_device *dev = q->dev_queue->dev;
|
||||
int err;
|
||||
|
||||
tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
|
||||
tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
goto no_offload_dev_dec;
|
||||
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
|
||||
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_dec;
|
||||
return;
|
||||
@ -1341,17 +1359,17 @@ static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
|
||||
struct tcf_block_owner_item {
|
||||
struct list_head list;
|
||||
struct Qdisc *q;
|
||||
enum tcf_block_binder_type binder_type;
|
||||
enum flow_block_binder_type binder_type;
|
||||
};
|
||||
|
||||
static void
|
||||
tcf_block_owner_netif_keep_dst(struct tcf_block *block,
|
||||
struct Qdisc *q,
|
||||
enum tcf_block_binder_type binder_type)
|
||||
enum flow_block_binder_type binder_type)
|
||||
{
|
||||
if (block->keep_dst &&
|
||||
binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
|
||||
binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
|
||||
binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
netif_keep_dst(qdisc_dev(q));
|
||||
}
|
||||
|
||||
@ -1368,7 +1386,7 @@ EXPORT_SYMBOL(tcf_block_netif_keep_dst);
|
||||
|
||||
static int tcf_block_owner_add(struct tcf_block *block,
|
||||
struct Qdisc *q,
|
||||
enum tcf_block_binder_type binder_type)
|
||||
enum flow_block_binder_type binder_type)
|
||||
{
|
||||
struct tcf_block_owner_item *item;
|
||||
|
||||
@ -1383,7 +1401,7 @@ static int tcf_block_owner_add(struct tcf_block *block,
|
||||
|
||||
static void tcf_block_owner_del(struct tcf_block *block,
|
||||
struct Qdisc *q,
|
||||
enum tcf_block_binder_type binder_type)
|
||||
enum flow_block_binder_type binder_type)
|
||||
{
|
||||
struct tcf_block_owner_item *item;
|
||||
|
||||
@ -1495,43 +1513,6 @@ void tcf_block_put(struct tcf_block *block)
|
||||
|
||||
EXPORT_SYMBOL(tcf_block_put);
|
||||
|
||||
struct tcf_block_cb {
|
||||
struct list_head list;
|
||||
tc_setup_cb_t *cb;
|
||||
void *cb_ident;
|
||||
void *cb_priv;
|
||||
unsigned int refcnt;
|
||||
};
|
||||
|
||||
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
|
||||
{
|
||||
return block_cb->cb_priv;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_priv);
|
||||
|
||||
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident)
|
||||
{ struct tcf_block_cb *block_cb;
|
||||
|
||||
list_for_each_entry(block_cb, &block->cb_list, list)
|
||||
if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
|
||||
return block_cb;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_lookup);
|
||||
|
||||
void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
|
||||
{
|
||||
block_cb->refcnt++;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_incref);
|
||||
|
||||
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
|
||||
{
|
||||
return --block_cb->refcnt;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_decref);
|
||||
|
||||
static int
|
||||
tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
|
||||
void *cb_priv, bool add, bool offload_in_use,
|
||||
@ -1573,66 +1554,76 @@ err_playback_remove:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int tcf_block_bind(struct tcf_block *block,
|
||||
struct flow_block_offload *bo)
|
||||
{
|
||||
struct flow_block_cb *block_cb, *next;
|
||||
int err, i = 0;
|
||||
|
||||
list_for_each_entry(block_cb, &bo->cb_list, list) {
|
||||
err = tcf_block_playback_offloads(block, block_cb->cb,
|
||||
block_cb->cb_priv, true,
|
||||
tcf_block_offload_in_use(block),
|
||||
bo->extack);
|
||||
if (err)
|
||||
goto err_unroll;
|
||||
|
||||
i++;
|
||||
}
|
||||
list_splice(&bo->cb_list, &block->cb_list);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unroll:
|
||||
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
|
||||
if (i-- > 0) {
|
||||
list_del(&block_cb->list);
|
||||
tcf_block_playback_offloads(block, block_cb->cb,
|
||||
block_cb->cb_priv, false,
|
||||
tcf_block_offload_in_use(block),
|
||||
NULL);
|
||||
}
|
||||
flow_block_cb_free(block_cb);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tcf_block_unbind(struct tcf_block *block,
|
||||
struct flow_block_offload *bo)
|
||||
{
|
||||
struct flow_block_cb *block_cb, *next;
|
||||
|
||||
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
|
||||
tcf_block_playback_offloads(block, block_cb->cb,
|
||||
block_cb->cb_priv, false,
|
||||
tcf_block_offload_in_use(block),
|
||||
NULL);
|
||||
list_del(&block_cb->list);
|
||||
flow_block_cb_free(block_cb);
|
||||
}
|
||||
}
|
||||
|
||||
static int tcf_block_setup(struct tcf_block *block,
|
||||
struct flow_block_offload *bo)
|
||||
{
|
||||
struct tcf_block_cb *block_cb;
|
||||
int err;
|
||||
|
||||
/* Replay any already present rules */
|
||||
err = tcf_block_playback_offloads(block, cb, cb_priv, true,
|
||||
tcf_block_offload_in_use(block),
|
||||
extack);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
switch (bo->command) {
|
||||
case FLOW_BLOCK_BIND:
|
||||
err = tcf_block_bind(block, bo);
|
||||
break;
|
||||
case FLOW_BLOCK_UNBIND:
|
||||
err = 0;
|
||||
tcf_block_unbind(block, bo);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
err = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
|
||||
if (!block_cb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
block_cb->cb = cb;
|
||||
block_cb->cb_ident = cb_ident;
|
||||
block_cb->cb_priv = cb_priv;
|
||||
list_add(&block_cb->list, &block->cb_list);
|
||||
return block_cb;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(__tcf_block_cb_register);
|
||||
|
||||
int tcf_block_cb_register(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident,
|
||||
void *cb_priv, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tcf_block_cb *block_cb;
|
||||
|
||||
block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
|
||||
extack);
|
||||
return PTR_ERR_OR_ZERO(block_cb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_register);
|
||||
|
||||
void __tcf_block_cb_unregister(struct tcf_block *block,
|
||||
struct tcf_block_cb *block_cb)
|
||||
{
|
||||
tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
|
||||
false, tcf_block_offload_in_use(block),
|
||||
NULL);
|
||||
list_del(&block_cb->list);
|
||||
kfree(block_cb);
|
||||
}
|
||||
EXPORT_SYMBOL(__tcf_block_cb_unregister);
|
||||
|
||||
void tcf_block_cb_unregister(struct tcf_block *block,
|
||||
tc_setup_cb_t *cb, void *cb_ident)
|
||||
{
|
||||
struct tcf_block_cb *block_cb;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
|
||||
if (!block_cb)
|
||||
return;
|
||||
__tcf_block_cb_unregister(block, block_cb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_unregister);
|
||||
|
||||
/* Main classifier routine: scans classifier chain attached
|
||||
* to this qdisc, (optionally) tests for protocol and asks
|
||||
@ -3156,7 +3147,7 @@ EXPORT_SYMBOL(tcf_exts_dump_stats);
|
||||
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
|
||||
void *type_data, bool err_stop)
|
||||
{
|
||||
struct tcf_block_cb *block_cb;
|
||||
struct flow_block_cb *block_cb;
|
||||
int ok_count = 0;
|
||||
int err;
|
||||
|
||||
|
@ -409,14 +409,14 @@ static void fl_destroy_filter_work(struct work_struct *work)
|
||||
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
|
||||
bool rtnl_held, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_cls_flower_offload cls_flower = {};
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
struct flow_cls_offload cls_flower = {};
|
||||
|
||||
if (!rtnl_held)
|
||||
rtnl_lock();
|
||||
|
||||
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
|
||||
cls_flower.command = TC_CLSFLOWER_DESTROY;
|
||||
cls_flower.command = FLOW_CLS_DESTROY;
|
||||
cls_flower.cookie = (unsigned long) f;
|
||||
|
||||
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
|
||||
@ -434,8 +434,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct cls_fl_head *head = fl_head_dereference(tp);
|
||||
struct tc_cls_flower_offload cls_flower = {};
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
struct flow_cls_offload cls_flower = {};
|
||||
bool skip_sw = tc_skip_sw(f->flags);
|
||||
int err = 0;
|
||||
|
||||
@ -449,7 +449,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
|
||||
}
|
||||
|
||||
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
|
||||
cls_flower.command = TC_CLSFLOWER_REPLACE;
|
||||
cls_flower.command = FLOW_CLS_REPLACE;
|
||||
cls_flower.cookie = (unsigned long) f;
|
||||
cls_flower.rule->match.dissector = &f->mask->dissector;
|
||||
cls_flower.rule->match.mask = &f->mask->key;
|
||||
@ -498,14 +498,14 @@ errout:
|
||||
static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
|
||||
bool rtnl_held)
|
||||
{
|
||||
struct tc_cls_flower_offload cls_flower = {};
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
struct flow_cls_offload cls_flower = {};
|
||||
|
||||
if (!rtnl_held)
|
||||
rtnl_lock();
|
||||
|
||||
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
|
||||
cls_flower.command = TC_CLSFLOWER_STATS;
|
||||
cls_flower.command = FLOW_CLS_STATS;
|
||||
cls_flower.cookie = (unsigned long) f;
|
||||
cls_flower.classid = f->res.classid;
|
||||
|
||||
@ -1803,8 +1803,8 @@ fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
|
||||
static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
|
||||
void *cb_priv, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tc_cls_flower_offload cls_flower = {};
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
struct flow_cls_offload cls_flower = {};
|
||||
struct cls_fl_filter *f = NULL;
|
||||
int err;
|
||||
|
||||
@ -1825,7 +1825,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
|
||||
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
|
||||
extack);
|
||||
cls_flower.command = add ?
|
||||
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
|
||||
FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
|
||||
cls_flower.cookie = (unsigned long)f;
|
||||
cls_flower.rule->match.dissector = &f->mask->dissector;
|
||||
cls_flower.rule->match.mask = &f->mask->key;
|
||||
@ -1869,7 +1869,7 @@ next_flow:
|
||||
static int fl_hw_create_tmplt(struct tcf_chain *chain,
|
||||
struct fl_flow_tmplt *tmplt)
|
||||
{
|
||||
struct tc_cls_flower_offload cls_flower = {};
|
||||
struct flow_cls_offload cls_flower = {};
|
||||
struct tcf_block *block = chain->block;
|
||||
|
||||
cls_flower.rule = flow_rule_alloc(0);
|
||||
@ -1877,7 +1877,7 @@ static int fl_hw_create_tmplt(struct tcf_chain *chain,
|
||||
return -ENOMEM;
|
||||
|
||||
cls_flower.common.chain_index = chain->index;
|
||||
cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
|
||||
cls_flower.command = FLOW_CLS_TMPLT_CREATE;
|
||||
cls_flower.cookie = (unsigned long) tmplt;
|
||||
cls_flower.rule->match.dissector = &tmplt->dissector;
|
||||
cls_flower.rule->match.mask = &tmplt->mask;
|
||||
@ -1895,11 +1895,11 @@ static int fl_hw_create_tmplt(struct tcf_chain *chain,
|
||||
static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
|
||||
struct fl_flow_tmplt *tmplt)
|
||||
{
|
||||
struct tc_cls_flower_offload cls_flower = {};
|
||||
struct flow_cls_offload cls_flower = {};
|
||||
struct tcf_block *block = chain->block;
|
||||
|
||||
cls_flower.common.chain_index = chain->index;
|
||||
cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
|
||||
cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
|
||||
cls_flower.cookie = (unsigned long) tmplt;
|
||||
|
||||
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
|
||||
|
@ -83,7 +83,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
|
||||
|
||||
q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
q->block_info.chain_head_change = clsact_chain_head_change;
|
||||
q->block_info.chain_head_change_priv = &q->miniqp;
|
||||
|
||||
@ -217,7 +217,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
|
||||
|
||||
q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
||||
q->ingress_block_info.chain_head_change = clsact_chain_head_change;
|
||||
q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
|
||||
|
||||
@ -228,7 +228,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
|
||||
|
||||
q->egress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
|
||||
q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
|
||||
q->egress_block_info.chain_head_change = clsact_chain_head_change;
|
||||
q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user