netfilter: nf_tables: move nft_cmp_fast_mask to where its used

... and cast result to u32 so sparse won't complain anymore.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Florian Westphal 2022-06-23 15:05:14 +02:00 committed by Pablo Neira Ayuso
parent ffb3d9a30c
commit 6b77205374
2 changed files with 12 additions and 10 deletions

View File

@ -56,16 +56,6 @@ struct nft_immediate_expr {
u8 dlen;
};
/* Calculate the mask for the nft_cmp_fast expression. On big endian the
* mask needs to include the *upper* bytes when interpreting that data as
* something smaller than the full u32, therefore a cpu_to_le32 is done.
*/
static inline u32 nft_cmp_fast_mask(unsigned int len)
{
return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
}
extern const struct nft_expr_ops nft_cmp_fast_ops;
extern const struct nft_expr_ops nft_cmp16_fast_ops;

View File

@ -197,6 +197,18 @@ static const struct nft_expr_ops nft_cmp_ops = {
.offload = nft_cmp_offload,
};
/* Calculate the mask for the nft_cmp_fast expression. On big endian the
* mask needs to include the *upper* bytes when interpreting that data as
* something smaller than the full u32, therefore a cpu_to_le32 is done.
*/
static u32 nft_cmp_fast_mask(unsigned int len)
{
__le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
return (__force u32)mask;
}
static int nft_cmp_fast_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])