Merge branch 'cxgb4-add-support-for-ethtool-n-tuple-filters'
Vishal Kulkarni says: ==================== cxgb4: add support for ethtool n-tuple filters Patch 1: Adds data structure to maintain list of filters and handles init/dinit of the same. Patch 2: Handles addition of filters via ETHTOOL_SRXCLSRLINS. Patch 3: Handles deletion of filtes via ETHTOOL_SRXCLSRLDEL. Patch 4: Handles viewing of added filters. Patch 5: Adds FLOW_ACTION_QUEUE support. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e9f0809fb9
@ -1066,6 +1066,17 @@ struct mps_entries_ref {
|
||||
refcount_t refcnt;
|
||||
};
|
||||
|
||||
struct cxgb4_ethtool_filter_info {
|
||||
u32 *loc_array; /* Array holding the actual TIDs set to filters */
|
||||
unsigned long *bmap; /* Bitmap for managing filters in use */
|
||||
u32 in_use; /* # of filters in use */
|
||||
};
|
||||
|
||||
struct cxgb4_ethtool_filter {
|
||||
u32 nentries; /* Adapter wide number of supported filters */
|
||||
struct cxgb4_ethtool_filter_info *port; /* Per port entry */
|
||||
};
|
||||
|
||||
struct adapter {
|
||||
void __iomem *regs;
|
||||
void __iomem *bar2;
|
||||
@ -1191,6 +1202,9 @@ struct adapter {
|
||||
|
||||
/* TC MATCHALL classifier offload */
|
||||
struct cxgb4_tc_matchall *tc_matchall;
|
||||
|
||||
/* Ethtool n-tuple */
|
||||
struct cxgb4_ethtool_filter *ethtool_filters;
|
||||
};
|
||||
|
||||
/* Support for "sched-class" command to allow a TX Scheduling Class to be
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include "t4_regs.h"
|
||||
#include "t4fw_api.h"
|
||||
#include "cxgb4_cudbg.h"
|
||||
#include "cxgb4_filter.h"
|
||||
#include "cxgb4_tc_flower.h"
|
||||
|
||||
#define EEPROM_MAGIC 0x38E2F10C
|
||||
|
||||
@ -1571,10 +1573,120 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
|
||||
u32 ftid)
|
||||
{
|
||||
struct tid_info *t = &adap->tids;
|
||||
struct filter_entry *f;
|
||||
|
||||
if (ftid < t->nhpftids)
|
||||
f = &adap->tids.hpftid_tab[ftid];
|
||||
else if (ftid < t->nftids)
|
||||
f = &adap->tids.ftid_tab[ftid - t->nhpftids];
|
||||
else
|
||||
f = lookup_tid(&adap->tids, ftid);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
|
||||
struct ch_filter_specification *dfs)
|
||||
{
|
||||
switch (dfs->val.proto) {
|
||||
case IPPROTO_TCP:
|
||||
if (dfs->type)
|
||||
fs->flow_type = TCP_V6_FLOW;
|
||||
else
|
||||
fs->flow_type = TCP_V4_FLOW;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
if (dfs->type)
|
||||
fs->flow_type = UDP_V6_FLOW;
|
||||
else
|
||||
fs->flow_type = UDP_V4_FLOW;
|
||||
break;
|
||||
}
|
||||
|
||||
if (dfs->type) {
|
||||
fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
|
||||
fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
|
||||
fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
|
||||
fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
|
||||
memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
|
||||
sizeof(fs->h_u.tcp_ip6_spec.ip6src));
|
||||
memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
|
||||
sizeof(fs->m_u.tcp_ip6_spec.ip6src));
|
||||
memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
|
||||
sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
|
||||
memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
|
||||
sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
|
||||
fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
|
||||
fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
|
||||
} else {
|
||||
fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
|
||||
fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
|
||||
fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
|
||||
fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
|
||||
memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
|
||||
sizeof(fs->h_u.tcp_ip4_spec.ip4src));
|
||||
memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
|
||||
sizeof(fs->m_u.tcp_ip4_spec.ip4src));
|
||||
memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
|
||||
sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
|
||||
memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
|
||||
sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
|
||||
fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
|
||||
fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
|
||||
}
|
||||
fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
|
||||
fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
|
||||
fs->flow_type |= FLOW_EXT;
|
||||
|
||||
if (dfs->action == FILTER_DROP)
|
||||
fs->ring_cookie = RX_CLS_FLOW_DISC;
|
||||
else
|
||||
fs->ring_cookie = dfs->iq;
|
||||
}
|
||||
|
||||
static int cxgb4_ntuple_get_filter(struct net_device *dev,
|
||||
struct ethtool_rxnfc *cmd,
|
||||
unsigned int loc)
|
||||
{
|
||||
const struct port_info *pi = netdev_priv(dev);
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct filter_entry *f;
|
||||
int ftid;
|
||||
|
||||
if (!(adap->flags & CXGB4_FULL_INIT_DONE))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Check for maximum filter range */
|
||||
if (!adap->ethtool_filters)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (loc >= adap->ethtool_filters->nentries)
|
||||
return -ERANGE;
|
||||
|
||||
if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
|
||||
return -ENOENT;
|
||||
|
||||
ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
|
||||
|
||||
/* Fetch filter_entry */
|
||||
f = cxgb4_get_filter_entry(adap, ftid);
|
||||
|
||||
cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
|
||||
u32 *rules)
|
||||
{
|
||||
const struct port_info *pi = netdev_priv(dev);
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
unsigned int count = 0, index = 0;
|
||||
int ret = 0;
|
||||
|
||||
switch (info->cmd) {
|
||||
case ETHTOOL_GRXFH: {
|
||||
@ -1630,10 +1742,144 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
|
||||
case ETHTOOL_GRXRINGS:
|
||||
info->data = pi->nqsets;
|
||||
return 0;
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
info->rule_cnt =
|
||||
adap->ethtool_filters->port[pi->port_id].in_use;
|
||||
return 0;
|
||||
case ETHTOOL_GRXCLSRULE:
|
||||
return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
|
||||
case ETHTOOL_GRXCLSRLALL:
|
||||
info->data = adap->ethtool_filters->nentries;
|
||||
while (count < info->rule_cnt) {
|
||||
ret = cxgb4_ntuple_get_filter(dev, info, index);
|
||||
if (!ret)
|
||||
rules[count++] = index;
|
||||
index++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int cxgb4_ntuple_del_filter(struct net_device *dev,
|
||||
struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct cxgb4_ethtool_filter_info *filter_info;
|
||||
struct adapter *adapter = netdev2adap(dev);
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
struct filter_entry *f;
|
||||
u32 filter_id;
|
||||
int ret;
|
||||
|
||||
if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
|
||||
return -EAGAIN; /* can still change nfilters */
|
||||
|
||||
if (!adapter->ethtool_filters)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
|
||||
dev_err(adapter->pdev_dev,
|
||||
"Location must be < %u",
|
||||
adapter->ethtool_filters->nentries);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
filter_info = &adapter->ethtool_filters->port[pi->port_id];
|
||||
|
||||
if (!test_bit(cmd->fs.location, filter_info->bmap))
|
||||
return -ENOENT;
|
||||
|
||||
filter_id = filter_info->loc_array[cmd->fs.location];
|
||||
f = cxgb4_get_filter_entry(adapter, filter_id);
|
||||
|
||||
ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
clear_bit(cmd->fs.location, filter_info->bmap);
|
||||
filter_info->in_use--;
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Add Ethtool n-tuple filters. */
|
||||
static int cxgb4_ntuple_set_filter(struct net_device *netdev,
|
||||
struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct ethtool_rx_flow_spec_input input = {};
|
||||
struct cxgb4_ethtool_filter_info *filter_info;
|
||||
struct adapter *adapter = netdev2adap(netdev);
|
||||
struct port_info *pi = netdev_priv(netdev);
|
||||
struct ch_filter_specification fs;
|
||||
struct ethtool_rx_flow_rule *flow;
|
||||
u32 tid;
|
||||
int ret;
|
||||
|
||||
if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
|
||||
return -EAGAIN; /* can still change nfilters */
|
||||
|
||||
if (!adapter->ethtool_filters)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
|
||||
dev_err(adapter->pdev_dev,
|
||||
"Location must be < %u",
|
||||
adapter->ethtool_filters->nentries);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (test_bit(cmd->fs.location,
|
||||
adapter->ethtool_filters->port[pi->port_id].bmap))
|
||||
return -EEXIST;
|
||||
|
||||
memset(&fs, 0, sizeof(fs));
|
||||
|
||||
input.fs = &cmd->fs;
|
||||
flow = ethtool_rx_flow_rule_create(&input);
|
||||
if (IS_ERR(flow)) {
|
||||
ret = PTR_ERR(flow);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
fs.hitcnts = 1;
|
||||
|
||||
ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
|
||||
NULL, &fs, &tid);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
filter_info = &adapter->ethtool_filters->port[pi->port_id];
|
||||
|
||||
filter_info->loc_array[cmd->fs.location] = tid;
|
||||
set_bit(cmd->fs.location, filter_info->bmap);
|
||||
filter_info->in_use++;
|
||||
|
||||
free:
|
||||
ethtool_rx_flow_rule_destroy(flow);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
int ret = -EOPNOTSUPP;
|
||||
|
||||
switch (cmd->cmd) {
|
||||
case ETHTOOL_SRXCLSRLINS:
|
||||
ret = cxgb4_ntuple_set_filter(dev, cmd);
|
||||
break;
|
||||
case ETHTOOL_SRXCLSRLDEL:
|
||||
ret = cxgb4_ntuple_del_filter(dev, cmd);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
|
||||
{
|
||||
struct adapter *adapter = netdev2adap(dev);
|
||||
@ -1839,6 +2085,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
|
||||
.get_regs_len = get_regs_len,
|
||||
.get_regs = get_regs,
|
||||
.get_rxnfc = get_rxnfc,
|
||||
.set_rxnfc = set_rxnfc,
|
||||
.get_rxfh_indir_size = get_rss_table_size,
|
||||
.get_rxfh = get_rss_table,
|
||||
.set_rxfh = set_rss_table,
|
||||
@ -1853,6 +2100,87 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
|
||||
.set_priv_flags = cxgb4_set_priv_flags,
|
||||
};
|
||||
|
||||
void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
|
||||
{
|
||||
struct cxgb4_ethtool_filter_info *eth_filter_info;
|
||||
u8 i;
|
||||
|
||||
if (!adap->ethtool_filters)
|
||||
return;
|
||||
|
||||
eth_filter_info = adap->ethtool_filters->port;
|
||||
|
||||
if (eth_filter_info) {
|
||||
for (i = 0; i < adap->params.nports; i++) {
|
||||
kvfree(eth_filter_info[i].loc_array);
|
||||
kfree(eth_filter_info[i].bmap);
|
||||
}
|
||||
kfree(eth_filter_info);
|
||||
}
|
||||
|
||||
kfree(adap->ethtool_filters);
|
||||
}
|
||||
|
||||
int cxgb4_init_ethtool_filters(struct adapter *adap)
|
||||
{
|
||||
struct cxgb4_ethtool_filter_info *eth_filter_info;
|
||||
struct cxgb4_ethtool_filter *eth_filter;
|
||||
struct tid_info *tids = &adap->tids;
|
||||
u32 nentries, i;
|
||||
int ret;
|
||||
|
||||
eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
|
||||
if (!eth_filter)
|
||||
return -ENOMEM;
|
||||
|
||||
eth_filter_info = kcalloc(adap->params.nports,
|
||||
sizeof(*eth_filter_info),
|
||||
GFP_KERNEL);
|
||||
if (!eth_filter_info) {
|
||||
ret = -ENOMEM;
|
||||
goto free_eth_filter;
|
||||
}
|
||||
|
||||
eth_filter->port = eth_filter_info;
|
||||
|
||||
nentries = tids->nhpftids + tids->nftids;
|
||||
if (is_hashfilter(adap))
|
||||
nentries += tids->nhash +
|
||||
(adap->tids.stid_base - adap->tids.tid_base);
|
||||
eth_filter->nentries = nentries;
|
||||
|
||||
for (i = 0; i < adap->params.nports; i++) {
|
||||
eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
|
||||
if (!eth_filter->port[i].loc_array) {
|
||||
ret = -ENOMEM;
|
||||
goto free_eth_finfo;
|
||||
}
|
||||
|
||||
eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
|
||||
sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
if (!eth_filter->port[i].bmap) {
|
||||
ret = -ENOMEM;
|
||||
goto free_eth_finfo;
|
||||
}
|
||||
}
|
||||
|
||||
adap->ethtool_filters = eth_filter;
|
||||
return 0;
|
||||
|
||||
free_eth_finfo:
|
||||
while (i-- > 0) {
|
||||
kfree(eth_filter->port[i].bmap);
|
||||
kvfree(eth_filter->port[i].loc_array);
|
||||
}
|
||||
kfree(eth_filter_info);
|
||||
|
||||
free_eth_filter:
|
||||
kfree(eth_filter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void cxgb4_set_ethtool_ops(struct net_device *netdev)
|
||||
{
|
||||
netdev->ethtool_ops = &cxgb_ethtool_ops;
|
||||
|
@ -1152,6 +1152,11 @@ bool is_filter_exact_match(struct adapter *adap,
|
||||
if (!is_hashfilter(adap))
|
||||
return false;
|
||||
|
||||
if ((atomic_read(&adap->tids.hash_tids_in_use) +
|
||||
atomic_read(&adap->tids.tids_in_use)) >=
|
||||
(adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base)))
|
||||
return false;
|
||||
|
||||
/* Keep tunnel VNI match disabled for hash-filters for now */
|
||||
if (fs->mask.encap_vld)
|
||||
return false;
|
||||
|
@ -53,4 +53,6 @@ void clear_all_filters(struct adapter *adapter);
|
||||
void init_hash_filter(struct adapter *adap);
|
||||
bool is_filter_exact_match(struct adapter *adap,
|
||||
struct ch_filter_specification *fs);
|
||||
void cxgb4_cleanup_ethtool_filters(struct adapter *adap);
|
||||
int cxgb4_init_ethtool_filters(struct adapter *adap);
|
||||
#endif /* __CXGB4_FILTER_H */
|
||||
|
@ -5860,6 +5860,7 @@ static void free_some_resources(struct adapter *adapter)
|
||||
cxgb4_cleanup_tc_mqprio(adapter);
|
||||
cxgb4_cleanup_tc_flower(adapter);
|
||||
cxgb4_cleanup_tc_u32(adapter);
|
||||
cxgb4_cleanup_ethtool_filters(adapter);
|
||||
kfree(adapter->sge.egr_map);
|
||||
kfree(adapter->sge.ingr_map);
|
||||
kfree(adapter->sge.starving_fl);
|
||||
@ -6370,7 +6371,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
|
||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_TC;
|
||||
NETIF_F_HW_TC | NETIF_F_NTUPLE;
|
||||
|
||||
if (chip_ver > CHELSIO_T5) {
|
||||
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
|
||||
@ -6493,6 +6494,24 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
i);
|
||||
}
|
||||
|
||||
if (is_offload(adapter) || is_hashfilter(adapter)) {
|
||||
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
|
||||
u32 v;
|
||||
|
||||
v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
|
||||
if (chip_ver <= CHELSIO_T5) {
|
||||
adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
|
||||
v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
|
||||
adapter->tids.hash_base = v / 4;
|
||||
} else {
|
||||
adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
|
||||
v = t4_read_reg(adapter,
|
||||
T6_LE_DB_HASH_TID_BASE_A);
|
||||
adapter->tids.hash_base = v;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tid_init(&adapter->tids) < 0) {
|
||||
dev_warn(&pdev->dev, "could not allocate TID table, "
|
||||
"continuing\n");
|
||||
@ -6514,22 +6533,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (cxgb4_init_tc_matchall(adapter))
|
||||
dev_warn(&pdev->dev,
|
||||
"could not offload tc matchall, continuing\n");
|
||||
}
|
||||
|
||||
if (is_offload(adapter) || is_hashfilter(adapter)) {
|
||||
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
|
||||
u32 hash_base, hash_reg;
|
||||
|
||||
if (chip_ver <= CHELSIO_T5) {
|
||||
hash_reg = LE_DB_TID_HASHBASE_A;
|
||||
hash_base = t4_read_reg(adapter, hash_reg);
|
||||
adapter->tids.hash_base = hash_base / 4;
|
||||
} else {
|
||||
hash_reg = T6_LE_DB_HASH_TID_BASE_A;
|
||||
hash_base = t4_read_reg(adapter, hash_reg);
|
||||
adapter->tids.hash_base = hash_base;
|
||||
}
|
||||
}
|
||||
if (cxgb4_init_ethtool_filters(adapter))
|
||||
dev_warn(&pdev->dev,
|
||||
"could not initialize ethtool filters, continuing\n");
|
||||
}
|
||||
|
||||
/* See what interrupts we'll be using */
|
||||
|
@ -81,19 +81,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
|
||||
}
|
||||
|
||||
static void cxgb4_process_flow_match(struct net_device *dev,
|
||||
struct flow_cls_offload *cls,
|
||||
struct flow_rule *rule,
|
||||
struct ch_filter_specification *fs)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
u16 addr_type = 0;
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
|
||||
struct flow_match_control match;
|
||||
|
||||
flow_rule_match_control(rule, &match);
|
||||
addr_type = match.key->addr_type;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
struct flow_match_basic match;
|
||||
u16 ethtype_key, ethtype_mask;
|
||||
@ -116,7 +106,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
||||
fs->mask.proto = match.mask->ip_proto;
|
||||
}
|
||||
|
||||
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
flow_rule_match_ipv4_addrs(rule, &match);
|
||||
@ -131,7 +121,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
||||
memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
|
||||
}
|
||||
|
||||
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
|
||||
flow_rule_match_ipv6_addrs(rule, &match);
|
||||
@ -224,9 +214,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
||||
}
|
||||
|
||||
static int cxgb4_validate_flow_match(struct net_device *dev,
|
||||
struct flow_cls_offload *cls)
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
u16 ethtype_mask = 0;
|
||||
u16 ethtype_key = 0;
|
||||
@ -436,6 +425,11 @@ void cxgb4_process_flow_actions(struct net_device *in,
|
||||
process_pedit_field(fs, val, mask, offset, htype);
|
||||
}
|
||||
break;
|
||||
case FLOW_ACTION_QUEUE:
|
||||
fs->action = FILTER_PASS;
|
||||
fs->dirsteer = 1;
|
||||
fs->iq = act->queue.index;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -620,6 +614,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
|
||||
act_pedit = true;
|
||||
}
|
||||
break;
|
||||
case FLOW_ACTION_QUEUE:
|
||||
/* Do nothing. cxgb4_set_filter will validate */
|
||||
break;
|
||||
default:
|
||||
netdev_err(dev, "%s: Unsupported action\n", __func__);
|
||||
return -EOPNOTSUPP;
|
||||
@ -693,14 +690,11 @@ out_unlock:
|
||||
spin_unlock_bh(&t->ftid_lock);
|
||||
}
|
||||
|
||||
int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
struct flow_cls_offload *cls)
|
||||
int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
|
||||
u32 tc_prio, struct netlink_ext_ack *extack,
|
||||
struct ch_filter_specification *fs, u32 *tid)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
struct netlink_ext_ack *extack = cls->common.extack;
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct ch_tc_flower_entry *ch_flower;
|
||||
struct ch_filter_specification *fs;
|
||||
struct filter_ctx ctx;
|
||||
u8 inet_family;
|
||||
int fidx, ret;
|
||||
@ -708,18 +702,10 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cxgb4_validate_flow_match(dev, cls))
|
||||
if (cxgb4_validate_flow_match(dev, rule))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ch_flower = allocate_flower_entry();
|
||||
if (!ch_flower) {
|
||||
netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fs = &ch_flower->fs;
|
||||
fs->hitcnts = 1;
|
||||
cxgb4_process_flow_match(dev, cls, fs);
|
||||
cxgb4_process_flow_match(dev, rule, fs);
|
||||
cxgb4_process_flow_actions(dev, &rule->action, fs);
|
||||
|
||||
fs->hash = is_filter_exact_match(adap, fs);
|
||||
@ -730,12 +716,11 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
* existing rules.
|
||||
*/
|
||||
fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
|
||||
cls->common.prio);
|
||||
tc_prio);
|
||||
if (fidx < 0) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"No free LETCAM index available");
|
||||
ret = -ENOMEM;
|
||||
goto free_entry;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (fidx < adap->tids.nhpftids) {
|
||||
@ -749,42 +734,70 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
if (fs->hash)
|
||||
fidx = 0;
|
||||
|
||||
fs->tc_prio = cls->common.prio;
|
||||
fs->tc_cookie = cls->cookie;
|
||||
fs->tc_prio = tc_prio;
|
||||
|
||||
init_completion(&ctx.completion);
|
||||
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
|
||||
if (ret) {
|
||||
netdev_err(dev, "%s: filter creation err %d\n",
|
||||
__func__, ret);
|
||||
goto free_entry;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Wait for reply */
|
||||
ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
|
||||
if (!ret) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto free_entry;
|
||||
if (!ret)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* Check if hw returned error for filter creation */
|
||||
if (ctx.result)
|
||||
return ctx.result;
|
||||
|
||||
*tid = ctx.tid;
|
||||
|
||||
if (fs->hash)
|
||||
cxgb4_tc_flower_hash_prio_add(adap, tc_prio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cxgb4_tc_flower_replace(struct net_device *dev,
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||
struct netlink_ext_ack *extack = cls->common.extack;
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct ch_tc_flower_entry *ch_flower;
|
||||
struct ch_filter_specification *fs;
|
||||
int ret;
|
||||
|
||||
ch_flower = allocate_flower_entry();
|
||||
if (!ch_flower) {
|
||||
netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ctx.result;
|
||||
/* Check if hw returned error for filter creation */
|
||||
fs = &ch_flower->fs;
|
||||
fs->hitcnts = 1;
|
||||
fs->tc_cookie = cls->cookie;
|
||||
|
||||
ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
|
||||
&ch_flower->filter_id);
|
||||
if (ret)
|
||||
goto free_entry;
|
||||
|
||||
ch_flower->tc_flower_cookie = cls->cookie;
|
||||
ch_flower->filter_id = ctx.tid;
|
||||
ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
|
||||
adap->flower_ht_params);
|
||||
if (ret)
|
||||
goto del_filter;
|
||||
|
||||
if (fs->hash)
|
||||
cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio);
|
||||
|
||||
return 0;
|
||||
|
||||
del_filter:
|
||||
if (fs->hash)
|
||||
cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);
|
||||
|
||||
cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
|
||||
|
||||
free_entry:
|
||||
@ -792,23 +805,38 @@ free_entry:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
|
||||
struct ch_filter_specification *fs, int tid)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
u8 hash;
|
||||
int ret;
|
||||
|
||||
hash = fs->hash;
|
||||
|
||||
ret = cxgb4_del_filter(dev, tid, fs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (hash)
|
||||
cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cxgb4_tc_flower_destroy(struct net_device *dev,
|
||||
struct flow_cls_offload *cls)
|
||||
{
|
||||
struct adapter *adap = netdev2adap(dev);
|
||||
struct ch_tc_flower_entry *ch_flower;
|
||||
u32 tc_prio;
|
||||
bool hash;
|
||||
int ret;
|
||||
|
||||
ch_flower = ch_flower_lookup(adap, cls->cookie);
|
||||
if (!ch_flower)
|
||||
return -ENOENT;
|
||||
|
||||
hash = ch_flower->fs.hash;
|
||||
tc_prio = ch_flower->fs.tc_prio;
|
||||
|
||||
ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
|
||||
ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
|
||||
&ch_flower->fs, ch_flower->filter_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -820,9 +848,6 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
|
||||
}
|
||||
kfree_rcu(ch_flower, rcu);
|
||||
|
||||
if (hash)
|
||||
cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
@ -121,6 +121,11 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
|
||||
struct flow_cls_offload *cls);
|
||||
int cxgb4_tc_flower_stats(struct net_device *dev,
|
||||
struct flow_cls_offload *cls);
|
||||
int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
|
||||
u32 tc_prio, struct netlink_ext_ack *extack,
|
||||
struct ch_filter_specification *fs, u32 *tid);
|
||||
int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
|
||||
struct ch_filter_specification *fs, int tid);
|
||||
|
||||
int cxgb4_init_tc_flower(struct adapter *adap);
|
||||
void cxgb4_cleanup_tc_flower(struct adapter *adap);
|
||||
|
@ -106,6 +106,8 @@ struct tid_info {
|
||||
unsigned long *stid_bmap;
|
||||
unsigned int nstids;
|
||||
unsigned int stid_base;
|
||||
|
||||
unsigned int nhash;
|
||||
unsigned int hash_base;
|
||||
|
||||
union aopen_entry *atid_tab;
|
||||
|
@ -3044,6 +3044,10 @@
|
||||
#define HASHTIDSIZE_M 0x3fU
|
||||
#define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M)
|
||||
|
||||
#define HASHTBLSIZE_S 3
|
||||
#define HASHTBLSIZE_M 0x1ffffU
|
||||
#define HASHTBLSIZE_G(x) (((x) >> HASHTBLSIZE_S) & HASHTBLSIZE_M)
|
||||
|
||||
#define LE_DB_HASH_TID_BASE_A 0x19c30
|
||||
#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
|
||||
#define LE_DB_INT_CAUSE_A 0x19c3c
|
||||
|
Loading…
Reference in New Issue
Block a user