mirror of
https://github.com/torvalds/linux.git
synced 2024-11-05 11:32:04 +00:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
This commit is contained in:
commit
749f621e20
@ -114,15 +114,17 @@ struct nf_sockopt_ops {
|
||||
int set_optmin;
|
||||
int set_optmax;
|
||||
int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
|
||||
#ifdef CONFIG_COMPAT
|
||||
int (*compat_set)(struct sock *sk, int optval,
|
||||
void __user *user, unsigned int len);
|
||||
|
||||
#endif
|
||||
int get_optmin;
|
||||
int get_optmax;
|
||||
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
|
||||
#ifdef CONFIG_COMPAT
|
||||
int (*compat_get)(struct sock *sk, int optval,
|
||||
void __user *user, int *len);
|
||||
|
||||
#endif
|
||||
/* Use the module struct to lock set/get code in place */
|
||||
struct module *owner;
|
||||
};
|
||||
@ -161,11 +163,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *indev,
|
||||
struct net_device *outdev,
|
||||
int (*okfn)(struct sk_buff *), int thresh,
|
||||
int cond)
|
||||
int (*okfn)(struct sk_buff *), int thresh)
|
||||
{
|
||||
if (!cond)
|
||||
return 1;
|
||||
#ifndef CONFIG_NETFILTER_DEBUG
|
||||
if (list_empty(&nf_hooks[pf][hook]))
|
||||
return 1;
|
||||
@ -177,7 +176,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
|
||||
struct net_device *indev, struct net_device *outdev,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN, 1);
|
||||
return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN);
|
||||
}
|
||||
|
||||
/* Activate hook; either okfn or kfree_skb called, unless a hook
|
||||
@ -197,36 +196,48 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
|
||||
coders :)
|
||||
*/
|
||||
|
||||
/* This is gross, but inline doesn't cut it for avoiding the function
|
||||
call in fast path: gcc doesn't inline (needs value tracking?). --RR */
|
||||
static inline int
|
||||
NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb,
|
||||
struct net_device *in, struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *), int thresh)
|
||||
{
|
||||
int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh);
|
||||
if (ret == 1)
|
||||
ret = okfn(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* HX: It's slightly less gross now. */
|
||||
static inline int
|
||||
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
|
||||
struct net_device *in, struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *), bool cond)
|
||||
{
|
||||
int ret = 1;
|
||||
if (cond ||
|
||||
(ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1))
|
||||
ret = okfn(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
|
||||
({int __ret; \
|
||||
if ((__ret=nf_hook_thresh(pf, hook, (skb), indev, outdev, okfn, thresh, 1)) == 1)\
|
||||
__ret = (okfn)(skb); \
|
||||
__ret;})
|
||||
|
||||
#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) \
|
||||
({int __ret; \
|
||||
if ((__ret=nf_hook_thresh(pf, hook, (skb), indev, outdev, okfn, INT_MIN, cond)) == 1)\
|
||||
__ret = (okfn)(skb); \
|
||||
__ret;})
|
||||
|
||||
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
|
||||
NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, INT_MIN)
|
||||
static inline int
|
||||
NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
|
||||
struct net_device *in, struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN);
|
||||
}
|
||||
|
||||
/* Call setsockopt() */
|
||||
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
||||
unsigned int len);
|
||||
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
||||
int *len);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
|
||||
char __user *opt, unsigned int len);
|
||||
int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
|
||||
char __user *opt, int *len);
|
||||
#endif
|
||||
|
||||
/* Call this before modifying an existing packet: ensures it is
|
||||
modifiable and linear to the point you care about (writable_len).
|
||||
@ -325,8 +336,7 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *indev,
|
||||
struct net_device *outdev,
|
||||
int (*okfn)(struct sk_buff *), int thresh,
|
||||
int cond)
|
||||
int (*okfn)(struct sk_buff *), int thresh)
|
||||
{
|
||||
return okfn(skb);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ header-y += nfnetlink_queue.h
|
||||
header-y += xt_CLASSIFY.h
|
||||
header-y += xt_CONNMARK.h
|
||||
header-y += xt_CONNSECMARK.h
|
||||
header-y += xt_CT.h
|
||||
header-y += xt_DSCP.h
|
||||
header-y += xt_LED.h
|
||||
header-y += xt_MARK.h
|
||||
|
@ -72,6 +72,28 @@ enum ip_conntrack_status {
|
||||
/* Connection has fixed timeout. */
|
||||
IPS_FIXED_TIMEOUT_BIT = 10,
|
||||
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
|
||||
|
||||
/* Conntrack is a template */
|
||||
IPS_TEMPLATE_BIT = 11,
|
||||
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
|
||||
};
|
||||
|
||||
/* Connection tracking event types */
|
||||
enum ip_conntrack_events {
|
||||
IPCT_NEW, /* new conntrack */
|
||||
IPCT_RELATED, /* related conntrack */
|
||||
IPCT_DESTROY, /* destroyed conntrack */
|
||||
IPCT_REPLY, /* connection has seen two-way traffic */
|
||||
IPCT_ASSURED, /* connection status has changed to assured */
|
||||
IPCT_PROTOINFO, /* protocol information has changed */
|
||||
IPCT_HELPER, /* new helper has been set */
|
||||
IPCT_MARK, /* new mark has been set */
|
||||
IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */
|
||||
IPCT_SECMARK, /* new security mark has been set */
|
||||
};
|
||||
|
||||
enum ip_conntrack_expect_events {
|
||||
IPEXP_NEW, /* new expectation */
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
@ -14,6 +14,7 @@ enum sip_expectation_classes {
|
||||
SIP_EXPECT_SIGNALLING,
|
||||
SIP_EXPECT_AUDIO,
|
||||
SIP_EXPECT_VIDEO,
|
||||
SIP_EXPECT_IMAGE,
|
||||
__SIP_EXPECT_MAX
|
||||
};
|
||||
#define SIP_EXPECT_MAX (__SIP_EXPECT_MAX - 1)
|
||||
@ -34,10 +35,10 @@ struct sdp_media_type {
|
||||
struct sip_handler {
|
||||
const char *method;
|
||||
unsigned int len;
|
||||
int (*request)(struct sk_buff *skb,
|
||||
int (*request)(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq);
|
||||
int (*response)(struct sk_buff *skb,
|
||||
int (*response)(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq, unsigned int code);
|
||||
};
|
||||
@ -84,7 +85,8 @@ enum sip_header_types {
|
||||
SIP_HDR_FROM,
|
||||
SIP_HDR_TO,
|
||||
SIP_HDR_CONTACT,
|
||||
SIP_HDR_VIA,
|
||||
SIP_HDR_VIA_UDP,
|
||||
SIP_HDR_VIA_TCP,
|
||||
SIP_HDR_EXPIRES,
|
||||
SIP_HDR_CONTENT_LENGTH,
|
||||
};
|
||||
@ -100,33 +102,40 @@ enum sdp_header_types {
|
||||
};
|
||||
|
||||
extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen);
|
||||
extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off);
|
||||
extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
struct nf_conntrack_expect *exp,
|
||||
unsigned int matchoff,
|
||||
unsigned int matchlen);
|
||||
extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
|
||||
const char **dptr,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
enum sdp_header_types type,
|
||||
enum sdp_header_types term,
|
||||
const union nf_inet_addr *addr);
|
||||
extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
unsigned int matchoff,
|
||||
unsigned int matchlen,
|
||||
u_int16_t port);
|
||||
extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
|
||||
const char **dptr,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
const union nf_inet_addr *addr);
|
||||
extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
struct nf_conntrack_expect *rtp_exp,
|
||||
|
@ -73,11 +73,11 @@ struct nfnetlink_subsystem {
|
||||
extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
|
||||
extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
|
||||
|
||||
extern int nfnetlink_has_listeners(unsigned int group);
|
||||
extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
|
||||
extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
|
||||
extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group,
|
||||
int echo, gfp_t flags);
|
||||
extern void nfnetlink_set_err(u32 pid, u32 group, int error);
|
||||
extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
|
||||
extern void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error);
|
||||
extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags);
|
||||
|
||||
extern void nfnl_lock(void);
|
||||
extern void nfnl_unlock(void);
|
||||
|
@ -40,6 +40,7 @@ enum ctattr_type {
|
||||
CTA_NAT_SEQ_ADJ_ORIG,
|
||||
CTA_NAT_SEQ_ADJ_REPLY,
|
||||
CTA_SECMARK,
|
||||
CTA_ZONE,
|
||||
__CTA_MAX
|
||||
};
|
||||
#define CTA_MAX (__CTA_MAX - 1)
|
||||
@ -159,6 +160,7 @@ enum ctattr_expect {
|
||||
CTA_EXPECT_TIMEOUT,
|
||||
CTA_EXPECT_ID,
|
||||
CTA_EXPECT_HELP_NAME,
|
||||
CTA_EXPECT_ZONE,
|
||||
__CTA_EXPECT_MAX
|
||||
};
|
||||
#define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1)
|
||||
|
@ -93,8 +93,7 @@ struct _xt_align {
|
||||
__u64 u64;
|
||||
};
|
||||
|
||||
#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \
|
||||
& ~(__alignof__(struct _xt_align)-1))
|
||||
#define XT_ALIGN(s) ALIGN((s), __alignof__(struct _xt_align))
|
||||
|
||||
/* Standard return verdict, or do jump. */
|
||||
#define XT_STANDARD_TARGET ""
|
||||
@ -205,6 +204,7 @@ struct xt_match_param {
|
||||
* @hook_mask: via which hooks the new rule is reachable
|
||||
*/
|
||||
struct xt_mtchk_param {
|
||||
struct net *net;
|
||||
const char *table;
|
||||
const void *entryinfo;
|
||||
const struct xt_match *match;
|
||||
@ -215,6 +215,7 @@ struct xt_mtchk_param {
|
||||
|
||||
/* Match destructor parameters */
|
||||
struct xt_mtdtor_param {
|
||||
struct net *net;
|
||||
const struct xt_match *match;
|
||||
void *matchinfo;
|
||||
u_int8_t family;
|
||||
@ -247,6 +248,7 @@ struct xt_target_param {
|
||||
* Other fields see above.
|
||||
*/
|
||||
struct xt_tgchk_param {
|
||||
struct net *net;
|
||||
const char *table;
|
||||
const void *entryinfo;
|
||||
const struct xt_target *target;
|
||||
@ -257,6 +259,7 @@ struct xt_tgchk_param {
|
||||
|
||||
/* Target destructor parameters */
|
||||
struct xt_tgdtor_param {
|
||||
struct net *net;
|
||||
const struct xt_target *target;
|
||||
void *targinfo;
|
||||
u_int8_t family;
|
||||
@ -281,11 +284,11 @@ struct xt_match {
|
||||
|
||||
/* Called when entry of this type deleted. */
|
||||
void (*destroy)(const struct xt_mtdtor_param *);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/* Called when userspace align differs from kernel space one */
|
||||
void (*compat_from_user)(void *dst, void *src);
|
||||
int (*compat_to_user)(void __user *dst, void *src);
|
||||
|
||||
void (*compat_from_user)(void *dst, const void *src);
|
||||
int (*compat_to_user)(void __user *dst, const void *src);
|
||||
#endif
|
||||
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
|
||||
struct module *me;
|
||||
|
||||
@ -294,7 +297,9 @@ struct xt_match {
|
||||
|
||||
const char *table;
|
||||
unsigned int matchsize;
|
||||
#ifdef CONFIG_COMPAT
|
||||
unsigned int compatsize;
|
||||
#endif
|
||||
unsigned int hooks;
|
||||
unsigned short proto;
|
||||
|
||||
@ -321,17 +326,19 @@ struct xt_target {
|
||||
|
||||
/* Called when entry of this type deleted. */
|
||||
void (*destroy)(const struct xt_tgdtor_param *);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/* Called when userspace align differs from kernel space one */
|
||||
void (*compat_from_user)(void *dst, void *src);
|
||||
int (*compat_to_user)(void __user *dst, void *src);
|
||||
|
||||
void (*compat_from_user)(void *dst, const void *src);
|
||||
int (*compat_to_user)(void __user *dst, const void *src);
|
||||
#endif
|
||||
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
|
||||
struct module *me;
|
||||
|
||||
const char *table;
|
||||
unsigned int targetsize;
|
||||
#ifdef CONFIG_COMPAT
|
||||
unsigned int compatsize;
|
||||
#endif
|
||||
unsigned int hooks;
|
||||
unsigned short proto;
|
||||
|
||||
@ -353,6 +360,7 @@ struct xt_table {
|
||||
struct module *me;
|
||||
|
||||
u_int8_t af; /* address/protocol family */
|
||||
int priority; /* hook order */
|
||||
|
||||
/* A unique name... */
|
||||
const char name[XT_TABLE_MAXNAMELEN];
|
||||
@ -514,6 +522,9 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
|
||||
extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <net/compat.h>
|
||||
|
||||
@ -554,11 +565,7 @@ struct compat_xt_entry_target {
|
||||
* current task alignment */
|
||||
|
||||
struct compat_xt_counters {
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
|
||||
u_int32_t cnt[4];
|
||||
#else
|
||||
u_int64_t cnt[2];
|
||||
#endif
|
||||
compat_u64 pcnt, bcnt; /* Packet and byte counters */
|
||||
};
|
||||
|
||||
struct compat_xt_counters_info {
|
||||
@ -567,26 +574,32 @@ struct compat_xt_counters_info {
|
||||
struct compat_xt_counters counters[0];
|
||||
};
|
||||
|
||||
#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
|
||||
& ~(__alignof__(struct compat_xt_counters)-1))
|
||||
struct _compat_xt_align {
|
||||
__u8 u8;
|
||||
__u16 u16;
|
||||
__u32 u32;
|
||||
compat_u64 u64;
|
||||
};
|
||||
|
||||
#define COMPAT_XT_ALIGN(s) ALIGN((s), __alignof__(struct _compat_xt_align))
|
||||
|
||||
extern void xt_compat_lock(u_int8_t af);
|
||||
extern void xt_compat_unlock(u_int8_t af);
|
||||
|
||||
extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
|
||||
extern void xt_compat_flush_offsets(u_int8_t af);
|
||||
extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset);
|
||||
extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
|
||||
|
||||
extern int xt_compat_match_offset(const struct xt_match *match);
|
||||
extern int xt_compat_match_from_user(struct xt_entry_match *m,
|
||||
void **dstptr, unsigned int *size);
|
||||
extern int xt_compat_match_to_user(struct xt_entry_match *m,
|
||||
extern int xt_compat_match_to_user(const struct xt_entry_match *m,
|
||||
void __user **dstptr, unsigned int *size);
|
||||
|
||||
extern int xt_compat_target_offset(const struct xt_target *target);
|
||||
extern void xt_compat_target_from_user(struct xt_entry_target *t,
|
||||
void **dstptr, unsigned int *size);
|
||||
extern int xt_compat_target_to_user(struct xt_entry_target *t,
|
||||
extern int xt_compat_target_to_user(const struct xt_entry_target *t,
|
||||
void __user **dstptr, unsigned int *size);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
17
include/linux/netfilter/xt_CT.h
Normal file
17
include/linux/netfilter/xt_CT.h
Normal file
@ -0,0 +1,17 @@
|
||||
#ifndef _XT_CT_H
|
||||
#define _XT_CT_H
|
||||
|
||||
#define XT_CT_NOTRACK 0x1
|
||||
|
||||
struct xt_ct_target_info {
|
||||
u_int16_t flags;
|
||||
u_int16_t zone;
|
||||
u_int32_t ct_events;
|
||||
u_int32_t exp_events;
|
||||
char helper[16];
|
||||
|
||||
/* Used internally by the kernel */
|
||||
struct nf_conn *ct __attribute__((aligned(8)));
|
||||
};
|
||||
|
||||
#endif /* _XT_CT_H */
|
@ -258,6 +258,7 @@ struct arpt_error {
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
extern void *arpt_alloc_initial_table(const struct xt_table *);
|
||||
extern struct xt_table *arpt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct arpt_replace *repl);
|
||||
|
@ -289,7 +289,7 @@ struct ebt_table {
|
||||
~(__alignof__(struct ebt_replace)-1))
|
||||
extern struct ebt_table *ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table);
|
||||
extern void ebt_unregister_table(struct ebt_table *table);
|
||||
extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
|
||||
extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
struct ebt_table *table);
|
||||
|
@ -242,7 +242,7 @@ extern void ipt_init(void) __init;
|
||||
extern struct xt_table *ipt_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct ipt_replace *repl);
|
||||
extern void ipt_unregister_table(struct xt_table *table);
|
||||
extern void ipt_unregister_table(struct net *net, struct xt_table *table);
|
||||
|
||||
/* Standard entry. */
|
||||
struct ipt_standard {
|
||||
@ -282,6 +282,7 @@ struct ipt_error {
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
extern void *ipt_alloc_initial_table(const struct xt_table *);
|
||||
extern unsigned int ipt_do_table(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
|
@ -297,10 +297,11 @@ ip6t_get_target(struct ip6t_entry *e)
|
||||
#include <linux/init.h>
|
||||
extern void ip6t_init(void) __init;
|
||||
|
||||
extern void *ip6t_alloc_initial_table(const struct xt_table *);
|
||||
extern struct xt_table *ip6t_register_table(struct net *net,
|
||||
const struct xt_table *table,
|
||||
const struct ip6t_replace *repl);
|
||||
extern void ip6t_unregister_table(struct xt_table *table);
|
||||
extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
|
||||
extern unsigned int ip6t_do_table(struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
|
@ -352,8 +352,11 @@ enum ip_defrag_users {
|
||||
IP_DEFRAG_LOCAL_DELIVER,
|
||||
IP_DEFRAG_CALL_RA_CHAIN,
|
||||
IP_DEFRAG_CONNTRACK_IN,
|
||||
__IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHORT_MAX,
|
||||
IP_DEFRAG_CONNTRACK_OUT,
|
||||
__IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHORT_MAX,
|
||||
IP_DEFRAG_CONNTRACK_BRIDGE_IN,
|
||||
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX,
|
||||
IP_DEFRAG_VS_IN,
|
||||
IP_DEFRAG_VS_OUT,
|
||||
IP_DEFRAG_VS_FWD
|
||||
|
@ -26,6 +26,11 @@
|
||||
#include <linux/ipv6.h> /* for struct ipv6hdr */
|
||||
#include <net/ipv6.h> /* for ipv6_addr_copy */
|
||||
|
||||
|
||||
/* Connections' size value needed by ip_vs_ctl.c */
|
||||
extern int ip_vs_conn_tab_size;
|
||||
|
||||
|
||||
struct ip_vs_iphdr {
|
||||
int len;
|
||||
__u8 protocol;
|
||||
@ -592,17 +597,6 @@ extern void ip_vs_init_hash_table(struct list_head *table, int rows);
|
||||
* (from ip_vs_conn.c)
|
||||
*/
|
||||
|
||||
/*
|
||||
* IPVS connection entry hash table
|
||||
*/
|
||||
#ifndef CONFIG_IP_VS_TAB_BITS
|
||||
#define CONFIG_IP_VS_TAB_BITS 12
|
||||
#endif
|
||||
|
||||
#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS
|
||||
#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS)
|
||||
#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1)
|
||||
|
||||
enum {
|
||||
IP_VS_DIR_INPUT = 0,
|
||||
IP_VS_DIR_OUTPUT,
|
||||
|
@ -246,6 +246,8 @@ extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb);
|
||||
int ip6_frag_nqueues(struct net *net);
|
||||
int ip6_frag_mem(struct net *net);
|
||||
|
||||
#define IPV6_FRAG_HIGH_THRESH 262144 /* == 256*1024 */
|
||||
#define IPV6_FRAG_LOW_THRESH 196608 /* == 192*1024 */
|
||||
#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */
|
||||
|
||||
extern int __ipv6_addr_type(const struct in6_addr *addr);
|
||||
@ -353,8 +355,11 @@ struct inet_frag_queue;
|
||||
enum ip6_defrag_users {
|
||||
IP6_DEFRAG_LOCAL_DELIVER,
|
||||
IP6_DEFRAG_CONNTRACK_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHORT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_OUT,
|
||||
__IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHORT_MAX,
|
||||
IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
|
||||
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHORT_MAX,
|
||||
};
|
||||
|
||||
struct ip6_create_arg {
|
||||
|
@ -81,6 +81,8 @@ struct net {
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
struct netns_ct ct;
|
||||
#endif
|
||||
struct sock *nfnl;
|
||||
struct sock *nfnl_stash;
|
||||
#endif
|
||||
#ifdef CONFIG_XFRM
|
||||
struct netns_xfrm xfrm;
|
||||
|
@ -70,7 +70,7 @@ union nf_conntrack_help {
|
||||
struct nf_conntrack_helper;
|
||||
|
||||
/* Must be kept in sync with the classes defined by helpers */
|
||||
#define NF_CT_MAX_EXPECT_CLASSES 3
|
||||
#define NF_CT_MAX_EXPECT_CLASSES 4
|
||||
|
||||
/* nf_conn feature for connections that have a helper */
|
||||
struct nf_conn_help {
|
||||
@ -198,7 +198,8 @@ extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int null
|
||||
extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
|
||||
|
||||
extern struct nf_conntrack_tuple_hash *
|
||||
__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
__nf_conntrack_find(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
extern void nf_conntrack_hash_insert(struct nf_conn *ct);
|
||||
extern void nf_ct_delete_from_lists(struct nf_conn *ct);
|
||||
@ -267,11 +268,16 @@ extern void
|
||||
nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
|
||||
extern void nf_conntrack_free(struct nf_conn *ct);
|
||||
extern struct nf_conn *
|
||||
nf_conntrack_alloc(struct net *net,
|
||||
nf_conntrack_alloc(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *orig,
|
||||
const struct nf_conntrack_tuple *repl,
|
||||
gfp_t gfp);
|
||||
|
||||
static inline int nf_ct_is_template(const struct nf_conn *ct)
|
||||
{
|
||||
return test_bit(IPS_TEMPLATE_BIT, &ct->status);
|
||||
}
|
||||
|
||||
/* It's confirmed if it is, or has been in the hash table. */
|
||||
static inline int nf_ct_is_confirmed(struct nf_conn *ct)
|
||||
{
|
||||
|
@ -49,7 +49,8 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
||||
|
||||
/* Find a connection corresponding to a tuple. */
|
||||
extern struct nf_conntrack_tuple_hash *
|
||||
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
nf_conntrack_find_get(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
extern int __nf_conntrack_confirm(struct sk_buff *skb);
|
||||
|
||||
|
@ -12,27 +12,12 @@
|
||||
#include <linux/netfilter/nf_conntrack_tuple_common.h>
|
||||
#include <net/netfilter/nf_conntrack_extend.h>
|
||||
|
||||
/* Connection tracking event types */
|
||||
enum ip_conntrack_events {
|
||||
IPCT_NEW = 0, /* new conntrack */
|
||||
IPCT_RELATED = 1, /* related conntrack */
|
||||
IPCT_DESTROY = 2, /* destroyed conntrack */
|
||||
IPCT_STATUS = 3, /* status has changed */
|
||||
IPCT_PROTOINFO = 4, /* protocol information has changed */
|
||||
IPCT_HELPER = 5, /* new helper has been set */
|
||||
IPCT_MARK = 6, /* new mark has been set */
|
||||
IPCT_NATSEQADJ = 7, /* NAT is doing sequence adjustment */
|
||||
IPCT_SECMARK = 8, /* new security mark has been set */
|
||||
};
|
||||
|
||||
enum ip_conntrack_expect_events {
|
||||
IPEXP_NEW = 0, /* new expectation */
|
||||
};
|
||||
|
||||
struct nf_conntrack_ecache {
|
||||
unsigned long cache; /* bitops want long */
|
||||
unsigned long missed; /* missed events */
|
||||
u32 pid; /* netlink pid of destroyer */
|
||||
unsigned long cache; /* bitops want long */
|
||||
unsigned long missed; /* missed events */
|
||||
u16 ctmask; /* bitmask of ct events to be delivered */
|
||||
u16 expmask; /* bitmask of expect events to be delivered */
|
||||
u32 pid; /* netlink pid of destroyer */
|
||||
};
|
||||
|
||||
static inline struct nf_conntrack_ecache *
|
||||
@ -42,14 +27,24 @@ nf_ct_ecache_find(const struct nf_conn *ct)
|
||||
}
|
||||
|
||||
static inline struct nf_conntrack_ecache *
|
||||
nf_ct_ecache_ext_add(struct nf_conn *ct, gfp_t gfp)
|
||||
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
if (!net->ct.sysctl_events)
|
||||
if (!ctmask && !expmask && net->ct.sysctl_events) {
|
||||
ctmask = ~0;
|
||||
expmask = ~0;
|
||||
}
|
||||
if (!ctmask && !expmask)
|
||||
return NULL;
|
||||
|
||||
return nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
|
||||
e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
|
||||
if (e) {
|
||||
e->ctmask = ctmask;
|
||||
e->expmask = expmask;
|
||||
}
|
||||
return e;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
@ -82,6 +77,9 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
|
||||
if (e == NULL)
|
||||
return;
|
||||
|
||||
if (!(e->ctmask & (1 << event)))
|
||||
return;
|
||||
|
||||
set_bit(event, &e->cache);
|
||||
}
|
||||
|
||||
@ -92,7 +90,6 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
||||
int report)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_ct_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
@ -101,9 +98,6 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
if (!net->ct.sysctl_events)
|
||||
goto out_unlock;
|
||||
|
||||
e = nf_ct_ecache_find(ct);
|
||||
if (e == NULL)
|
||||
goto out_unlock;
|
||||
@ -117,6 +111,9 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
|
||||
/* This is a resent of a destroy event? If so, skip missed */
|
||||
unsigned long missed = e->pid ? 0 : e->missed;
|
||||
|
||||
if (!((eventmask | missed) & e->ctmask))
|
||||
goto out_unlock;
|
||||
|
||||
ret = notify->fcn(eventmask | missed, &item);
|
||||
if (unlikely(ret < 0 || missed)) {
|
||||
spin_lock_bh(&ct->lock);
|
||||
@ -172,18 +169,19 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
|
||||
u32 pid,
|
||||
int report)
|
||||
{
|
||||
struct net *net = nf_ct_exp_net(exp);
|
||||
struct nf_exp_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
rcu_read_lock();
|
||||
notify = rcu_dereference(nf_expect_event_cb);
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
if (!net->ct.sysctl_events)
|
||||
e = nf_ct_ecache_find(exp->master);
|
||||
if (e == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
{
|
||||
if (e->expmask & (1 << event)) {
|
||||
struct nf_exp_event item = {
|
||||
.exp = exp,
|
||||
.pid = pid,
|
||||
|
@ -56,16 +56,13 @@ struct nf_conntrack_expect {
|
||||
|
||||
static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
|
||||
{
|
||||
#ifdef CONFIG_NET_NS
|
||||
return exp->master->ct_net; /* by definition */
|
||||
#else
|
||||
return &init_net;
|
||||
#endif
|
||||
return nf_ct_net(exp->master);
|
||||
}
|
||||
|
||||
struct nf_conntrack_expect_policy {
|
||||
unsigned int max_expected;
|
||||
unsigned int timeout;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#define NF_CT_EXPECT_CLASS_DEFAULT 0
|
||||
@ -77,13 +74,16 @@ int nf_conntrack_expect_init(struct net *net);
|
||||
void nf_conntrack_expect_fini(struct net *net);
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
__nf_ct_expect_find(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
nf_ct_expect_find_get(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple);
|
||||
nf_ct_find_expectation(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple);
|
||||
|
||||
void nf_ct_unlink_expect(struct nf_conntrack_expect *exp);
|
||||
void nf_ct_remove_expectations(struct nf_conn *ct);
|
||||
|
@ -8,6 +8,7 @@ enum nf_ct_ext_id {
|
||||
NF_CT_EXT_NAT,
|
||||
NF_CT_EXT_ACCT,
|
||||
NF_CT_EXT_ECACHE,
|
||||
NF_CT_EXT_ZONE,
|
||||
NF_CT_EXT_NUM,
|
||||
};
|
||||
|
||||
@ -15,6 +16,7 @@ enum nf_ct_ext_id {
|
||||
#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
|
||||
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
|
||||
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
|
||||
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
|
||||
|
||||
/* Extensions: optional stuff which isn't permanently in struct. */
|
||||
struct nf_ct_ext {
|
||||
|
@ -40,14 +40,18 @@ struct nf_conntrack_helper {
|
||||
};
|
||||
|
||||
extern struct nf_conntrack_helper *
|
||||
__nf_conntrack_helper_find_byname(const char *name);
|
||||
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
|
||||
|
||||
extern struct nf_conntrack_helper *
|
||||
nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
|
||||
|
||||
extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
|
||||
extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
|
||||
|
||||
extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
|
||||
|
||||
extern int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags);
|
||||
extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
||||
gfp_t flags);
|
||||
|
||||
extern void nf_ct_helper_destroy(struct nf_conn *ct);
|
||||
|
||||
|
@ -49,8 +49,8 @@ struct nf_conntrack_l4proto {
|
||||
/* Called when a conntrack entry is destroyed */
|
||||
void (*destroy)(struct nf_conn *ct);
|
||||
|
||||
int (*error)(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||
unsigned int dataoff, enum ip_conntrack_info *ctinfo,
|
||||
u_int8_t pf, unsigned int hooknum);
|
||||
|
||||
/* Print out the per-protocol part of the tuple. Return like seq_* */
|
||||
|
23
include/net/netfilter/nf_conntrack_zones.h
Normal file
23
include/net/netfilter/nf_conntrack_zones.h
Normal file
@ -0,0 +1,23 @@
|
||||
#ifndef _NF_CONNTRACK_ZONES_H
|
||||
#define _NF_CONNTRACK_ZONES_H
|
||||
|
||||
#include <net/netfilter/nf_conntrack_extend.h>
|
||||
|
||||
#define NF_CT_DEFAULT_ZONE 0
|
||||
|
||||
struct nf_conntrack_zone {
|
||||
u16 id;
|
||||
};
|
||||
|
||||
static inline u16 nf_ct_zone(const struct nf_conn *ct)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
struct nf_conntrack_zone *nf_ct_zone;
|
||||
nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
|
||||
if (nf_ct_zone)
|
||||
return nf_ct_zone->id;
|
||||
#endif
|
||||
return NF_CT_DEFAULT_ZONE;
|
||||
}
|
||||
|
||||
#endif /* _NF_CONNTRACK_ZONES_H */
|
@ -7,13 +7,27 @@
|
||||
struct sk_buff;
|
||||
|
||||
/* These return true or false. */
|
||||
extern int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int match_offset,
|
||||
unsigned int match_len,
|
||||
const char *rep_buffer,
|
||||
unsigned int rep_len);
|
||||
extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int match_offset,
|
||||
unsigned int match_len,
|
||||
const char *rep_buffer,
|
||||
unsigned int rep_len, bool adjust);
|
||||
|
||||
static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int match_offset,
|
||||
unsigned int match_len,
|
||||
const char *rep_buffer,
|
||||
unsigned int rep_len)
|
||||
{
|
||||
return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
|
||||
match_offset, match_len,
|
||||
rep_buffer, rep_len, true);
|
||||
}
|
||||
|
||||
extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
@ -21,6 +35,10 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
|
||||
unsigned int match_len,
|
||||
const char *rep_buffer,
|
||||
unsigned int rep_len);
|
||||
|
||||
extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
__be32 seq, s16 off);
|
||||
extern int nf_nat_seq_adjust(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo);
|
||||
|
@ -37,7 +37,9 @@ struct netns_ipv4 {
|
||||
struct xt_table *iptable_mangle;
|
||||
struct xt_table *iptable_raw;
|
||||
struct xt_table *arptable_filter;
|
||||
#ifdef CONFIG_SECURITY
|
||||
struct xt_table *iptable_security;
|
||||
#endif
|
||||
struct xt_table *nat_table;
|
||||
struct hlist_head *nat_bysource;
|
||||
unsigned int nat_htable_size;
|
||||
|
@ -36,7 +36,9 @@ struct netns_ipv6 {
|
||||
struct xt_table *ip6table_filter;
|
||||
struct xt_table *ip6table_mangle;
|
||||
struct xt_table *ip6table_raw;
|
||||
#ifdef CONFIG_SECURITY
|
||||
struct xt_table *ip6table_security;
|
||||
#endif
|
||||
#endif
|
||||
struct rt6_info *ip6_null_entry;
|
||||
struct rt6_statistics *rt6_stats;
|
||||
|
@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_802_3_mt,
|
||||
.checkentry = ebt_802_3_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)),
|
||||
.matchsize = sizeof(struct ebt_802_3_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_arp_mt,
|
||||
.checkentry = ebt_arp_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)),
|
||||
.matchsize = sizeof(struct ebt_arp_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
|
||||
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
|
||||
.target = ebt_arpreply_tg,
|
||||
.checkentry = ebt_arpreply_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)),
|
||||
.targetsize = sizeof(struct ebt_arpreply_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = {
|
||||
(1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
|
||||
.target = ebt_dnat_tg,
|
||||
.checkentry = ebt_dnat_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)),
|
||||
.targetsize = sizeof(struct ebt_nat_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_ip_mt,
|
||||
.checkentry = ebt_ip_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)),
|
||||
.matchsize = sizeof(struct ebt_ip_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_ip6_mt,
|
||||
.checkentry = ebt_ip6_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)),
|
||||
.matchsize = sizeof(struct ebt_ip6_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -90,7 +90,7 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_limit_mt,
|
||||
.checkentry = ebt_limit_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)),
|
||||
.matchsize = sizeof(struct ebt_limit_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.target = ebt_log_tg,
|
||||
.checkentry = ebt_log_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_log_info)),
|
||||
.targetsize = sizeof(struct ebt_log_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -59,7 +59,7 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.target = ebt_mark_tg,
|
||||
.checkentry = ebt_mark_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)),
|
||||
.targetsize = sizeof(struct ebt_mark_t_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -41,7 +41,7 @@ static struct xt_match ebt_mark_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_mark_mt,
|
||||
.checkentry = ebt_mark_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)),
|
||||
.matchsize = sizeof(struct ebt_mark_m_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.target = ebt_nflog_tg,
|
||||
.checkentry = ebt_nflog_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)),
|
||||
.targetsize = sizeof(struct ebt_nflog_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_pkttype_mt,
|
||||
.checkentry = ebt_pkttype_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)),
|
||||
.matchsize = sizeof(struct ebt_pkttype_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = {
|
||||
(1 << NF_BR_BROUTING),
|
||||
.target = ebt_redirect_tg,
|
||||
.checkentry = ebt_redirect_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)),
|
||||
.targetsize = sizeof(struct ebt_redirect_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = {
|
||||
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
|
||||
.target = ebt_snat_tg,
|
||||
.checkentry = ebt_snat_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)),
|
||||
.targetsize = sizeof(struct ebt_nat_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_stp_mt,
|
||||
.checkentry = ebt_stp_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)),
|
||||
.matchsize = sizeof(struct ebt_stp_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -275,7 +275,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.target = ebt_ulog_tg,
|
||||
.checkentry = ebt_ulog_tg_check,
|
||||
.targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)),
|
||||
.targetsize = sizeof(struct ebt_ulog_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
|
||||
.family = NFPROTO_BRIDGE,
|
||||
.match = ebt_vlan_mt,
|
||||
.checkentry = ebt_vlan_mt_check,
|
||||
.matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)),
|
||||
.matchsize = sizeof(struct ebt_vlan_info),
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net)
|
||||
|
||||
static void __net_exit broute_net_exit(struct net *net)
|
||||
{
|
||||
ebt_unregister_table(net->xt.broute_table);
|
||||
ebt_unregister_table(net, net->xt.broute_table);
|
||||
}
|
||||
|
||||
static struct pernet_operations broute_net_ops = {
|
||||
|
@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit frame_filter_net_exit(struct net *net)
|
||||
{
|
||||
ebt_unregister_table(net->xt.frame_filter);
|
||||
ebt_unregister_table(net, net->xt.frame_filter);
|
||||
}
|
||||
|
||||
static struct pernet_operations frame_filter_net_ops = {
|
||||
|
@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net)
|
||||
|
||||
static void __net_exit frame_nat_net_exit(struct net *net)
|
||||
{
|
||||
ebt_unregister_table(net->xt.frame_nat);
|
||||
ebt_unregister_table(net, net->xt.frame_nat);
|
||||
}
|
||||
|
||||
static struct pernet_operations frame_nat_net_ops = {
|
||||
|
@ -82,7 +82,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
|
||||
return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
|
||||
}
|
||||
|
||||
static inline int ebt_dev_check(char *entry, const struct net_device *device)
|
||||
static inline int
|
||||
ebt_dev_check(const char *entry, const struct net_device *device)
|
||||
{
|
||||
int i = 0;
|
||||
const char *devname;
|
||||
@ -100,8 +101,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device)
|
||||
|
||||
#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
|
||||
/* process standard matches */
|
||||
static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h,
|
||||
const struct net_device *in, const struct net_device *out)
|
||||
static inline int
|
||||
ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
|
||||
const struct net_device *in, const struct net_device *out)
|
||||
{
|
||||
int verdict, i;
|
||||
|
||||
@ -156,12 +158,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
|
||||
int i, nentries;
|
||||
struct ebt_entry *point;
|
||||
struct ebt_counter *counter_base, *cb_base;
|
||||
struct ebt_entry_target *t;
|
||||
const struct ebt_entry_target *t;
|
||||
int verdict, sp = 0;
|
||||
struct ebt_chainstack *cs;
|
||||
struct ebt_entries *chaininfo;
|
||||
char *base;
|
||||
struct ebt_table_info *private;
|
||||
const char *base;
|
||||
const struct ebt_table_info *private;
|
||||
bool hotdrop = false;
|
||||
struct xt_match_param mtpar;
|
||||
struct xt_target_param tgpar;
|
||||
@ -395,7 +397,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ebt_verify_pointers(struct ebt_replace *repl,
|
||||
static int ebt_verify_pointers(const struct ebt_replace *repl,
|
||||
struct ebt_table_info *newinfo)
|
||||
{
|
||||
unsigned int limit = repl->entries_size;
|
||||
@ -442,6 +444,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
|
||||
break;
|
||||
if (left < e->next_offset)
|
||||
break;
|
||||
if (e->next_offset < sizeof(struct ebt_entry))
|
||||
return -EINVAL;
|
||||
offset += e->next_offset;
|
||||
}
|
||||
}
|
||||
@ -466,8 +470,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
|
||||
* to parse the userspace data
|
||||
*/
|
||||
static inline int
|
||||
ebt_check_entry_size_and_hooks(struct ebt_entry *e,
|
||||
struct ebt_table_info *newinfo,
|
||||
ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
|
||||
const struct ebt_table_info *newinfo,
|
||||
unsigned int *n, unsigned int *cnt,
|
||||
unsigned int *totalcnt, unsigned int *udc_cnt)
|
||||
{
|
||||
@ -561,13 +565,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
|
||||
}
|
||||
|
||||
static inline int
|
||||
ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
|
||||
ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
|
||||
{
|
||||
struct xt_mtdtor_param par;
|
||||
|
||||
if (i && (*i)-- == 0)
|
||||
return 1;
|
||||
|
||||
par.net = net;
|
||||
par.match = m->u.match;
|
||||
par.matchinfo = m->data;
|
||||
par.family = NFPROTO_BRIDGE;
|
||||
@ -578,13 +583,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
|
||||
}
|
||||
|
||||
static inline int
|
||||
ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
|
||||
ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
|
||||
{
|
||||
struct xt_tgdtor_param par;
|
||||
|
||||
if (i && (*i)-- == 0)
|
||||
return 1;
|
||||
|
||||
par.net = net;
|
||||
par.target = w->u.watcher;
|
||||
par.targinfo = w->data;
|
||||
par.family = NFPROTO_BRIDGE;
|
||||
@ -595,7 +601,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
|
||||
}
|
||||
|
||||
static inline int
|
||||
ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
|
||||
ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
|
||||
{
|
||||
struct xt_tgdtor_param par;
|
||||
struct ebt_entry_target *t;
|
||||
@ -605,10 +611,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
|
||||
/* we're done */
|
||||
if (cnt && (*cnt)-- == 0)
|
||||
return 1;
|
||||
EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL);
|
||||
EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL);
|
||||
EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
|
||||
EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
|
||||
t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
|
||||
|
||||
par.net = net;
|
||||
par.target = t->u.target;
|
||||
par.targinfo = t->data;
|
||||
par.family = NFPROTO_BRIDGE;
|
||||
@ -619,7 +626,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
|
||||
}
|
||||
|
||||
static inline int
|
||||
ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
|
||||
ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
const struct ebt_table_info *newinfo,
|
||||
const char *name, unsigned int *cnt,
|
||||
struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
|
||||
{
|
||||
@ -671,6 +679,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
|
||||
}
|
||||
i = 0;
|
||||
|
||||
mtpar.net = tgpar.net = net;
|
||||
mtpar.table = tgpar.table = name;
|
||||
mtpar.entryinfo = tgpar.entryinfo = e;
|
||||
mtpar.hook_mask = tgpar.hook_mask = hookmask;
|
||||
@ -726,9 +735,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
|
||||
(*cnt)++;
|
||||
return 0;
|
||||
cleanup_watchers:
|
||||
EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j);
|
||||
EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
|
||||
cleanup_matches:
|
||||
EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i);
|
||||
EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -737,12 +746,12 @@ cleanup_matches:
|
||||
* the hook mask for udc tells us from which base chains the udc can be
|
||||
* accessed. This mask is a parameter to the check() functions of the extensions
|
||||
*/
|
||||
static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
|
||||
static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
|
||||
unsigned int udc_cnt, unsigned int hooknr, char *base)
|
||||
{
|
||||
int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
|
||||
struct ebt_entry *e = (struct ebt_entry *)chain->data;
|
||||
struct ebt_entry_target *t;
|
||||
const struct ebt_entry *e = (struct ebt_entry *)chain->data;
|
||||
const struct ebt_entry_target *t;
|
||||
|
||||
while (pos < nentries || chain_nr != -1) {
|
||||
/* end of udc, go back one 'recursion' step */
|
||||
@ -808,7 +817,8 @@ letscontinue:
|
||||
}
|
||||
|
||||
/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
|
||||
static int translate_table(char *name, struct ebt_table_info *newinfo)
|
||||
static int translate_table(struct net *net, const char *name,
|
||||
struct ebt_table_info *newinfo)
|
||||
{
|
||||
unsigned int i, j, k, udc_cnt;
|
||||
int ret;
|
||||
@ -917,17 +927,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
|
||||
/* used to know what we need to clean up if something goes wrong */
|
||||
i = 0;
|
||||
ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
|
||||
ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt);
|
||||
ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
|
||||
if (ret != 0) {
|
||||
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
|
||||
ebt_cleanup_entry, &i);
|
||||
ebt_cleanup_entry, net, &i);
|
||||
}
|
||||
vfree(cl_s);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* called under write_lock */
|
||||
static void get_counters(struct ebt_counter *oldcounters,
|
||||
static void get_counters(const struct ebt_counter *oldcounters,
|
||||
struct ebt_counter *counters, unsigned int nentries)
|
||||
{
|
||||
int i, cpu;
|
||||
@ -950,7 +960,8 @@ static void get_counters(struct ebt_counter *oldcounters,
|
||||
}
|
||||
|
||||
/* replace the table */
|
||||
static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
static int do_replace(struct net *net, const void __user *user,
|
||||
unsigned int len)
|
||||
{
|
||||
int ret, i, countersize;
|
||||
struct ebt_table_info *newinfo;
|
||||
@ -1017,7 +1028,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
if (ret != 0)
|
||||
goto free_counterstmp;
|
||||
|
||||
ret = translate_table(tmp.name, newinfo);
|
||||
ret = translate_table(net, tmp.name, newinfo);
|
||||
|
||||
if (ret != 0)
|
||||
goto free_counterstmp;
|
||||
@ -1070,7 +1081,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
|
||||
/* decrease module count and free resources */
|
||||
EBT_ENTRY_ITERATE(table->entries, table->entries_size,
|
||||
ebt_cleanup_entry, NULL);
|
||||
ebt_cleanup_entry, net, NULL);
|
||||
|
||||
vfree(table->entries);
|
||||
if (table->chainstack) {
|
||||
@ -1087,7 +1098,7 @@ free_unlock:
|
||||
mutex_unlock(&ebt_mutex);
|
||||
free_iterate:
|
||||
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
|
||||
ebt_cleanup_entry, NULL);
|
||||
ebt_cleanup_entry, net, NULL);
|
||||
free_counterstmp:
|
||||
vfree(counterstmp);
|
||||
/* can be initialized in translate_table() */
|
||||
@ -1154,7 +1165,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
|
||||
newinfo->hook_entry[i] = p +
|
||||
((char *)repl->hook_entry[i] - repl->entries);
|
||||
}
|
||||
ret = translate_table(repl->name, newinfo);
|
||||
ret = translate_table(net, repl->name, newinfo);
|
||||
if (ret != 0) {
|
||||
BUGPRINT("Translate_table failed\n");
|
||||
goto free_chainstack;
|
||||
@ -1204,7 +1215,7 @@ out:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void ebt_unregister_table(struct ebt_table *table)
|
||||
void ebt_unregister_table(struct net *net, struct ebt_table *table)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1216,7 +1227,7 @@ void ebt_unregister_table(struct ebt_table *table)
|
||||
list_del(&table->list);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
|
||||
ebt_cleanup_entry, NULL);
|
||||
ebt_cleanup_entry, net, NULL);
|
||||
if (table->private->nentries)
|
||||
module_put(table->me);
|
||||
vfree(table->private->entries);
|
||||
@ -1230,7 +1241,8 @@ void ebt_unregister_table(struct ebt_table *table)
|
||||
}
|
||||
|
||||
/* userspace just supplied us with counters */
|
||||
static int update_counters(struct net *net, void __user *user, unsigned int len)
|
||||
static int update_counters(struct net *net, const void __user *user,
|
||||
unsigned int len)
|
||||
{
|
||||
int i, ret;
|
||||
struct ebt_counter *tmp;
|
||||
@ -1285,8 +1297,8 @@ free_tmp:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int ebt_make_matchname(struct ebt_entry_match *m,
|
||||
char *base, char __user *ubase)
|
||||
static inline int ebt_make_matchname(const struct ebt_entry_match *m,
|
||||
const char *base, char __user *ubase)
|
||||
{
|
||||
char __user *hlp = ubase + ((char *)m - base);
|
||||
if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
|
||||
@ -1294,8 +1306,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
|
||||
char *base, char __user *ubase)
|
||||
static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
|
||||
const char *base, char __user *ubase)
|
||||
{
|
||||
char __user *hlp = ubase + ((char *)w - base);
|
||||
if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
|
||||
@ -1303,11 +1315,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase)
|
||||
static inline int
|
||||
ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
|
||||
{
|
||||
int ret;
|
||||
char __user *hlp;
|
||||
struct ebt_entry_target *t;
|
||||
const struct ebt_entry_target *t;
|
||||
|
||||
if (e->bitmask == 0)
|
||||
return 0;
|
||||
@ -1328,10 +1341,11 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
|
||||
|
||||
/* called with ebt_mutex locked */
|
||||
static int copy_everything_to_user(struct ebt_table *t, void __user *user,
|
||||
int *len, int cmd)
|
||||
const int *len, int cmd)
|
||||
{
|
||||
struct ebt_replace tmp;
|
||||
struct ebt_counter *counterstmp, *oldcounters;
|
||||
struct ebt_counter *counterstmp;
|
||||
const struct ebt_counter *oldcounters;
|
||||
unsigned int entries_size, nentries;
|
||||
char *entries;
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter_arp/arp_tables.h>
|
||||
#include "../../netfilter/xt_repldata.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
|
||||
@ -58,6 +59,12 @@ do { \
|
||||
#define ARP_NF_ASSERT(x)
|
||||
#endif
|
||||
|
||||
void *arpt_alloc_initial_table(const struct xt_table *info)
|
||||
{
|
||||
return xt_alloc_initial_table(arpt, ARPT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
|
||||
|
||||
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
|
||||
const char *hdr_addr, int len)
|
||||
{
|
||||
@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
static inline struct arpt_entry *get_entry(void *base, unsigned int offset)
|
||||
static inline const struct arpt_entry_target *
|
||||
arpt_get_target_c(const struct arpt_entry *e)
|
||||
{
|
||||
return arpt_get_target((struct arpt_entry *)e);
|
||||
}
|
||||
|
||||
static inline struct arpt_entry *
|
||||
get_entry(const void *base, unsigned int offset)
|
||||
{
|
||||
return (struct arpt_entry *)(base + offset);
|
||||
}
|
||||
@ -273,7 +287,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
|
||||
arp = arp_hdr(skb);
|
||||
do {
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
int hdr_len;
|
||||
|
||||
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
|
||||
@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
||||
(2 * skb->dev->addr_len);
|
||||
ADD_COUNTER(e->counters, hdr_len, 1);
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
|
||||
/* Standard target? */
|
||||
if (!t->u.kernel.target->target) {
|
||||
@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp)
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
* there are loops. Puts hook bitmask in comefrom.
|
||||
*/
|
||||
static int mark_source_chains(struct xt_table_info *newinfo,
|
||||
static int mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
{
|
||||
unsigned int hook;
|
||||
@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
|
||||
|
||||
for (;;) {
|
||||
const struct arpt_standard_target *t
|
||||
= (void *)arpt_get_target(e);
|
||||
= (void *)arpt_get_target_c(e);
|
||||
int visited = e->comefrom & (1 << hook);
|
||||
|
||||
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
|
||||
@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int check_entry(struct arpt_entry *e, const char *name)
|
||||
static inline int check_entry(const struct arpt_entry *e, const char *name)
|
||||
{
|
||||
const struct arpt_entry_target *t;
|
||||
|
||||
@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
|
||||
if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
@ -533,14 +547,14 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool check_underflow(struct arpt_entry *e)
|
||||
static bool check_underflow(const struct arpt_entry *e)
|
||||
{
|
||||
const struct arpt_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->arp))
|
||||
return false;
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
return false;
|
||||
verdict = ((struct arpt_standard_target *)t)->verdict;
|
||||
@ -550,8 +564,8 @@ static bool check_underflow(struct arpt_entry *e)
|
||||
|
||||
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int valid_hooks,
|
||||
@ -761,11 +775,11 @@ static void get_counters(const struct xt_table_info *t,
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
* (other than comefrom, which userspace doesn't care
|
||||
@ -783,11 +797,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
}
|
||||
|
||||
static int copy_entries_to_user(unsigned int total_size,
|
||||
struct xt_table *table,
|
||||
const struct xt_table *table,
|
||||
void __user *userptr)
|
||||
{
|
||||
unsigned int off, num;
|
||||
struct arpt_entry *e;
|
||||
const struct arpt_entry *e;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
@ -807,7 +821,7 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||
/* FIXME: use iterator macros --RR */
|
||||
/* ... then go back and fix counters and names */
|
||||
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
|
||||
e = (struct arpt_entry *)(loc_cpu_entry + off);
|
||||
if (copy_to_user(userptr + off
|
||||
@ -818,7 +832,7 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||
goto free_counters;
|
||||
}
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
if (copy_to_user(userptr + off + e->target_offset
|
||||
+ offsetof(struct arpt_entry_target,
|
||||
u.user.name),
|
||||
@ -835,7 +849,7 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static void compat_standard_from_user(void *dst, void *src)
|
||||
static void compat_standard_from_user(void *dst, const void *src)
|
||||
{
|
||||
int v = *(compat_int_t *)src;
|
||||
|
||||
@ -844,7 +858,7 @@ static void compat_standard_from_user(void *dst, void *src)
|
||||
memcpy(dst, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static int compat_standard_to_user(void __user *dst, void *src)
|
||||
static int compat_standard_to_user(void __user *dst, const void *src)
|
||||
{
|
||||
compat_int_t cv = *(int *)src;
|
||||
|
||||
@ -853,18 +867,18 @@ static int compat_standard_to_user(void __user *dst, void *src)
|
||||
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int compat_calc_entry(struct arpt_entry *e,
|
||||
static int compat_calc_entry(const struct arpt_entry *e,
|
||||
const struct xt_table_info *info,
|
||||
void *base, struct xt_table_info *newinfo)
|
||||
const void *base, struct xt_table_info *newinfo)
|
||||
{
|
||||
struct arpt_entry_target *t;
|
||||
const struct arpt_entry_target *t;
|
||||
unsigned int entry_offset;
|
||||
int off, i, ret;
|
||||
|
||||
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
|
||||
entry_offset = (void *)e - base;
|
||||
|
||||
t = arpt_get_target(e);
|
||||
t = arpt_get_target_c(e);
|
||||
off += xt_compat_target_offset(t->u.kernel.target);
|
||||
newinfo->size -= off;
|
||||
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
|
||||
@ -900,7 +914,8 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
static int get_info(struct net *net, void __user *user,
|
||||
const int *len, int compat)
|
||||
{
|
||||
char name[ARPT_TABLE_MAXNAMELEN];
|
||||
struct xt_table *t;
|
||||
@ -959,7 +974,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
}
|
||||
|
||||
static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
|
||||
int *len)
|
||||
const int *len)
|
||||
{
|
||||
int ret;
|
||||
struct arpt_get_entries get;
|
||||
@ -1073,7 +1088,8 @@ static int __do_replace(struct net *net, const char *name,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
static int do_replace(struct net *net, const void __user *user,
|
||||
unsigned int len)
|
||||
{
|
||||
int ret;
|
||||
struct arpt_replace tmp;
|
||||
@ -1133,8 +1149,8 @@ add_counter_to_entry(struct arpt_entry *e,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_add_counters(struct net *net, void __user *user, unsigned int len,
|
||||
int compat)
|
||||
static int do_add_counters(struct net *net, const void __user *user,
|
||||
unsigned int len, int compat)
|
||||
{
|
||||
unsigned int i, curcpu;
|
||||
struct xt_counters_info tmp;
|
||||
@ -1238,10 +1254,10 @@ static inline int
|
||||
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int *i,
|
||||
const char *name)
|
||||
{
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter_arp/arp_tables.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
@ -15,93 +16,37 @@ MODULE_DESCRIPTION("arptables filter table");
|
||||
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
|
||||
(1 << NF_ARP_FORWARD))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct arpt_replace repl;
|
||||
struct arpt_standard entries[3];
|
||||
struct arpt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
|
||||
.hook_entry = {
|
||||
[NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
|
||||
},
|
||||
.underflow = {
|
||||
[NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
|
||||
},
|
||||
.term = ARPT_ERROR_INIT,
|
||||
};
|
||||
|
||||
static const struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_ARP,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c */
|
||||
static unsigned int arpt_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
static unsigned int
|
||||
arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return arpt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.arptable_filter);
|
||||
const struct net *net = dev_net((in != NULL) ? in : out);
|
||||
|
||||
return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
|
||||
}
|
||||
|
||||
static unsigned int arpt_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return arpt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.arptable_filter);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops arpt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = arpt_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_ARP,
|
||||
.hooknum = NF_ARP_IN,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = arpt_out_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_ARP,
|
||||
.hooknum = NF_ARP_OUT,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = arpt_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_ARP,
|
||||
.hooknum = NF_ARP_FORWARD,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *arpfilter_ops __read_mostly;
|
||||
|
||||
static int __net_init arptable_filter_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct arpt_replace *repl;
|
||||
|
||||
repl = arpt_alloc_initial_table(&packet_filter);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.arptable_filter =
|
||||
arpt_register_table(net, &packet_filter, &initial_table.repl);
|
||||
arpt_register_table(net, &packet_filter, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.arptable_filter))
|
||||
return PTR_ERR(net->ipv4.arptable_filter);
|
||||
return 0;
|
||||
@ -125,9 +70,11 @@ static int __init arptable_filter_init(void)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
|
||||
if (ret < 0)
|
||||
arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
|
||||
if (IS_ERR(arpfilter_ops)) {
|
||||
ret = PTR_ERR(arpfilter_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
return ret;
|
||||
|
||||
cleanup_table:
|
||||
@ -137,7 +84,7 @@ cleanup_table:
|
||||
|
||||
static void __exit arptable_filter_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
|
||||
xt_hook_unlink(&packet_filter, arpfilter_ops);
|
||||
unregister_pernet_subsys(&arptable_filter_net_ops);
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter_ipv4/ip_tables.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
#include "../../netfilter/xt_repldata.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
|
||||
@ -66,6 +67,12 @@ do { \
|
||||
#define inline
|
||||
#endif
|
||||
|
||||
void *ipt_alloc_initial_table(const struct xt_table *info)
|
||||
{
|
||||
return xt_alloc_initial_table(ipt, IPT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
|
||||
|
||||
/*
|
||||
We keep a set of rules for each CPU, so we can avoid write-locking
|
||||
them in the softirq when updating the counters and therefore
|
||||
@ -169,7 +176,7 @@ ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
|
||||
/* Performance critical - called for every packet */
|
||||
static inline bool
|
||||
do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
|
||||
do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
|
||||
struct xt_match_param *par)
|
||||
{
|
||||
par->match = m->u.kernel.match;
|
||||
@ -184,7 +191,7 @@ do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
|
||||
|
||||
/* Performance critical */
|
||||
static inline struct ipt_entry *
|
||||
get_entry(void *base, unsigned int offset)
|
||||
get_entry(const void *base, unsigned int offset)
|
||||
{
|
||||
return (struct ipt_entry *)(base + offset);
|
||||
}
|
||||
@ -199,6 +206,13 @@ static inline bool unconditional(const struct ipt_ip *ip)
|
||||
#undef FWINV
|
||||
}
|
||||
|
||||
/* for const-correctness */
|
||||
static inline const struct ipt_entry_target *
|
||||
ipt_get_target_c(const struct ipt_entry *e)
|
||||
{
|
||||
return ipt_get_target((struct ipt_entry *)e);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
|
||||
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
|
||||
static const char *const hooknames[] = {
|
||||
@ -233,11 +247,11 @@ static struct nf_loginfo trace_loginfo = {
|
||||
|
||||
/* Mildly perf critical (only if packet tracing is on) */
|
||||
static inline int
|
||||
get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
|
||||
get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
|
||||
const char *hookname, const char **chainname,
|
||||
const char **comment, unsigned int *rulenum)
|
||||
{
|
||||
struct ipt_standard_target *t = (void *)ipt_get_target(s);
|
||||
const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
|
||||
|
||||
if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
|
||||
/* Head of user chain: ERROR target with chainname */
|
||||
@ -263,15 +277,15 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void trace_packet(struct sk_buff *skb,
|
||||
static void trace_packet(const struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
const char *tablename,
|
||||
struct xt_table_info *private,
|
||||
struct ipt_entry *e)
|
||||
const struct xt_table_info *private,
|
||||
const struct ipt_entry *e)
|
||||
{
|
||||
void *table_base;
|
||||
const void *table_base;
|
||||
const struct ipt_entry *root;
|
||||
const char *hookname, *chainname, *comment;
|
||||
unsigned int rulenum = 0;
|
||||
@ -315,9 +329,9 @@ ipt_do_table(struct sk_buff *skb,
|
||||
/* Initializing verdict to NF_DROP keeps gcc happy. */
|
||||
unsigned int verdict = NF_DROP;
|
||||
const char *indev, *outdev;
|
||||
void *table_base;
|
||||
const void *table_base;
|
||||
struct ipt_entry *e, *back;
|
||||
struct xt_table_info *private;
|
||||
const struct xt_table_info *private;
|
||||
struct xt_match_param mtpar;
|
||||
struct xt_target_param tgpar;
|
||||
|
||||
@ -350,7 +364,7 @@ ipt_do_table(struct sk_buff *skb,
|
||||
back = get_entry(table_base, private->underflow[hook]);
|
||||
|
||||
do {
|
||||
struct ipt_entry_target *t;
|
||||
const struct ipt_entry_target *t;
|
||||
|
||||
IP_NF_ASSERT(e);
|
||||
IP_NF_ASSERT(back);
|
||||
@ -443,7 +457,7 @@ ipt_do_table(struct sk_buff *skb,
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
mark_source_chains(struct xt_table_info *newinfo,
|
||||
mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
{
|
||||
unsigned int hook;
|
||||
@ -461,8 +475,8 @@ mark_source_chains(struct xt_table_info *newinfo,
|
||||
e->counters.pcnt = pos;
|
||||
|
||||
for (;;) {
|
||||
struct ipt_standard_target *t
|
||||
= (void *)ipt_get_target(e);
|
||||
const struct ipt_standard_target *t
|
||||
= (void *)ipt_get_target_c(e);
|
||||
int visited = e->comefrom & (1 << hook);
|
||||
|
||||
if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
|
||||
@ -553,13 +567,14 @@ mark_source_chains(struct xt_table_info *newinfo,
|
||||
}
|
||||
|
||||
static int
|
||||
cleanup_match(struct ipt_entry_match *m, unsigned int *i)
|
||||
cleanup_match(struct ipt_entry_match *m, struct net *net, unsigned int *i)
|
||||
{
|
||||
struct xt_mtdtor_param par;
|
||||
|
||||
if (i && (*i)-- == 0)
|
||||
return 1;
|
||||
|
||||
par.net = net;
|
||||
par.match = m->u.kernel.match;
|
||||
par.matchinfo = m->data;
|
||||
par.family = NFPROTO_IPV4;
|
||||
@ -570,9 +585,9 @@ cleanup_match(struct ipt_entry_match *m, unsigned int *i)
|
||||
}
|
||||
|
||||
static int
|
||||
check_entry(struct ipt_entry *e, const char *name)
|
||||
check_entry(const struct ipt_entry *e, const char *name)
|
||||
{
|
||||
struct ipt_entry_target *t;
|
||||
const struct ipt_entry_target *t;
|
||||
|
||||
if (!ip_checkentry(&e->ip)) {
|
||||
duprintf("ip_tables: ip check failed %p %s.\n", e, name);
|
||||
@ -583,7 +598,7 @@ check_entry(struct ipt_entry *e, const char *name)
|
||||
e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = ipt_get_target(e);
|
||||
t = ipt_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
@ -637,10 +652,11 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_target(struct ipt_entry *e, const char *name)
|
||||
static int check_target(struct ipt_entry *e, struct net *net, const char *name)
|
||||
{
|
||||
struct ipt_entry_target *t = ipt_get_target(e);
|
||||
struct xt_tgchk_param par = {
|
||||
.net = net,
|
||||
.table = name,
|
||||
.entryinfo = e,
|
||||
.target = t->u.kernel.target,
|
||||
@ -661,8 +677,8 @@ static int check_target(struct ipt_entry *e, const char *name)
|
||||
}
|
||||
|
||||
static int
|
||||
find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
|
||||
unsigned int *i)
|
||||
find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
||||
unsigned int size, unsigned int *i)
|
||||
{
|
||||
struct ipt_entry_target *t;
|
||||
struct xt_target *target;
|
||||
@ -675,6 +691,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
|
||||
return ret;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ip;
|
||||
mtpar.hook_mask = e->comefrom;
|
||||
@ -695,7 +712,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
|
||||
}
|
||||
t->u.kernel.target = target;
|
||||
|
||||
ret = check_target(e, name);
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -704,18 +721,18 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
|
||||
err:
|
||||
module_put(t->u.kernel.target->me);
|
||||
cleanup_matches:
|
||||
IPT_MATCH_ITERATE(e, cleanup_match, &j);
|
||||
IPT_MATCH_ITERATE(e, cleanup_match, net, &j);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool check_underflow(struct ipt_entry *e)
|
||||
static bool check_underflow(const struct ipt_entry *e)
|
||||
{
|
||||
const struct ipt_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->ip))
|
||||
return false;
|
||||
t = ipt_get_target(e);
|
||||
t = ipt_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
return false;
|
||||
verdict = ((struct ipt_standard_target *)t)->verdict;
|
||||
@ -726,8 +743,8 @@ static bool check_underflow(struct ipt_entry *e)
|
||||
static int
|
||||
check_entry_size_and_hooks(struct ipt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int valid_hooks,
|
||||
@ -774,7 +791,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
|
||||
}
|
||||
|
||||
static int
|
||||
cleanup_entry(struct ipt_entry *e, unsigned int *i)
|
||||
cleanup_entry(struct ipt_entry *e, struct net *net, unsigned int *i)
|
||||
{
|
||||
struct xt_tgdtor_param par;
|
||||
struct ipt_entry_target *t;
|
||||
@ -783,9 +800,10 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i)
|
||||
return 1;
|
||||
|
||||
/* Cleanup all matches */
|
||||
IPT_MATCH_ITERATE(e, cleanup_match, NULL);
|
||||
IPT_MATCH_ITERATE(e, cleanup_match, net, NULL);
|
||||
t = ipt_get_target(e);
|
||||
|
||||
par.net = net;
|
||||
par.target = t->u.kernel.target;
|
||||
par.targinfo = t->data;
|
||||
par.family = NFPROTO_IPV4;
|
||||
@ -798,7 +816,8 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i)
|
||||
/* Checks and translates the user-supplied table segment (held in
|
||||
newinfo) */
|
||||
static int
|
||||
translate_table(const char *name,
|
||||
translate_table(struct net *net,
|
||||
const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info *newinfo,
|
||||
void *entry0,
|
||||
@ -860,11 +879,11 @@ translate_table(const char *name,
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
find_check_entry, name, size, &i);
|
||||
find_check_entry, net, name, size, &i);
|
||||
|
||||
if (ret != 0) {
|
||||
IPT_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
cleanup_entry, &i);
|
||||
cleanup_entry, net, &i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -940,11 +959,11 @@ get_counters(const struct xt_table_info *t,
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static struct xt_counters * alloc_counters(struct xt_table *table)
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
(other than comefrom, which userspace doesn't care
|
||||
@ -962,11 +981,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
|
||||
|
||||
static int
|
||||
copy_entries_to_user(unsigned int total_size,
|
||||
struct xt_table *table,
|
||||
const struct xt_table *table,
|
||||
void __user *userptr)
|
||||
{
|
||||
unsigned int off, num;
|
||||
struct ipt_entry *e;
|
||||
const struct ipt_entry *e;
|
||||
struct xt_counters *counters;
|
||||
const struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
@ -1018,7 +1037,7 @@ copy_entries_to_user(unsigned int total_size,
|
||||
}
|
||||
}
|
||||
|
||||
t = ipt_get_target(e);
|
||||
t = ipt_get_target_c(e);
|
||||
if (copy_to_user(userptr + off + e->target_offset
|
||||
+ offsetof(struct ipt_entry_target,
|
||||
u.user.name),
|
||||
@ -1035,7 +1054,7 @@ copy_entries_to_user(unsigned int total_size,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static void compat_standard_from_user(void *dst, void *src)
|
||||
static void compat_standard_from_user(void *dst, const void *src)
|
||||
{
|
||||
int v = *(compat_int_t *)src;
|
||||
|
||||
@ -1044,7 +1063,7 @@ static void compat_standard_from_user(void *dst, void *src)
|
||||
memcpy(dst, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static int compat_standard_to_user(void __user *dst, void *src)
|
||||
static int compat_standard_to_user(void __user *dst, const void *src)
|
||||
{
|
||||
compat_int_t cv = *(int *)src;
|
||||
|
||||
@ -1054,24 +1073,24 @@ static int compat_standard_to_user(void __user *dst, void *src)
|
||||
}
|
||||
|
||||
static inline int
|
||||
compat_calc_match(struct ipt_entry_match *m, int *size)
|
||||
compat_calc_match(const struct ipt_entry_match *m, int *size)
|
||||
{
|
||||
*size += xt_compat_match_offset(m->u.kernel.match);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compat_calc_entry(struct ipt_entry *e,
|
||||
static int compat_calc_entry(const struct ipt_entry *e,
|
||||
const struct xt_table_info *info,
|
||||
void *base, struct xt_table_info *newinfo)
|
||||
const void *base, struct xt_table_info *newinfo)
|
||||
{
|
||||
struct ipt_entry_target *t;
|
||||
const struct ipt_entry_target *t;
|
||||
unsigned int entry_offset;
|
||||
int off, i, ret;
|
||||
|
||||
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
|
||||
entry_offset = (void *)e - base;
|
||||
IPT_MATCH_ITERATE(e, compat_calc_match, &off);
|
||||
t = ipt_get_target(e);
|
||||
t = ipt_get_target_c(e);
|
||||
off += xt_compat_target_offset(t->u.kernel.target);
|
||||
newinfo->size -= off;
|
||||
ret = xt_compat_add_offset(AF_INET, entry_offset, off);
|
||||
@ -1107,7 +1126,8 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
static int get_info(struct net *net, void __user *user,
|
||||
const int *len, int compat)
|
||||
{
|
||||
char name[IPT_TABLE_MAXNAMELEN];
|
||||
struct xt_table *t;
|
||||
@ -1167,7 +1187,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
}
|
||||
|
||||
static int
|
||||
get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
|
||||
get_entries(struct net *net, struct ipt_get_entries __user *uptr,
|
||||
const int *len)
|
||||
{
|
||||
int ret;
|
||||
struct ipt_get_entries get;
|
||||
@ -1258,7 +1279,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
||||
/* Decrease module usage counts and free resource */
|
||||
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
|
||||
IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
|
||||
NULL);
|
||||
net, NULL);
|
||||
xt_free_table_info(oldinfo);
|
||||
if (copy_to_user(counters_ptr, counters,
|
||||
sizeof(struct xt_counters) * num_counters) != 0)
|
||||
@ -1277,7 +1298,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
||||
}
|
||||
|
||||
static int
|
||||
do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
do_replace(struct net *net, const void __user *user, unsigned int len)
|
||||
{
|
||||
int ret;
|
||||
struct ipt_replace tmp;
|
||||
@ -1303,7 +1324,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_table(tmp.name, tmp.valid_hooks,
|
||||
ret = translate_table(net, tmp.name, tmp.valid_hooks,
|
||||
newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
|
||||
tmp.hook_entry, tmp.underflow);
|
||||
if (ret != 0)
|
||||
@ -1318,7 +1339,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return 0;
|
||||
|
||||
free_newinfo_untrans:
|
||||
IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
|
||||
IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
@ -1338,7 +1359,8 @@ add_counter_to_entry(struct ipt_entry *e,
|
||||
}
|
||||
|
||||
static int
|
||||
do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
|
||||
do_add_counters(struct net *net, const void __user *user,
|
||||
unsigned int len, int compat)
|
||||
{
|
||||
unsigned int i, curcpu;
|
||||
struct xt_counters_info tmp;
|
||||
@ -1534,10 +1556,10 @@ static int
|
||||
check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int *i,
|
||||
const char *name)
|
||||
{
|
||||
@ -1655,7 +1677,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
|
||||
}
|
||||
|
||||
static int
|
||||
compat_check_entry(struct ipt_entry *e, const char *name,
|
||||
compat_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
||||
unsigned int *i)
|
||||
{
|
||||
struct xt_mtchk_param mtpar;
|
||||
@ -1663,6 +1685,7 @@ compat_check_entry(struct ipt_entry *e, const char *name,
|
||||
int ret;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ip;
|
||||
mtpar.hook_mask = e->comefrom;
|
||||
@ -1671,7 +1694,7 @@ compat_check_entry(struct ipt_entry *e, const char *name,
|
||||
if (ret)
|
||||
goto cleanup_matches;
|
||||
|
||||
ret = check_target(e, name);
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto cleanup_matches;
|
||||
|
||||
@ -1679,12 +1702,13 @@ compat_check_entry(struct ipt_entry *e, const char *name,
|
||||
return 0;
|
||||
|
||||
cleanup_matches:
|
||||
IPT_MATCH_ITERATE(e, cleanup_match, &j);
|
||||
IPT_MATCH_ITERATE(e, cleanup_match, net, &j);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
translate_compat_table(const char *name,
|
||||
translate_compat_table(struct net *net,
|
||||
const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info **pinfo,
|
||||
void **pentry0,
|
||||
@ -1773,12 +1797,12 @@ translate_compat_table(const char *name,
|
||||
|
||||
i = 0;
|
||||
ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
|
||||
name, &i);
|
||||
net, name, &i);
|
||||
if (ret) {
|
||||
j -= i;
|
||||
COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
|
||||
compat_release_entry, &j);
|
||||
IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
|
||||
IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
@ -1833,7 +1857,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_compat_table(tmp.name, tmp.valid_hooks,
|
||||
ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
|
||||
&newinfo, &loc_cpu_entry, tmp.size,
|
||||
tmp.num_entries, tmp.hook_entry,
|
||||
tmp.underflow);
|
||||
@ -1849,7 +1873,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return 0;
|
||||
|
||||
free_newinfo_untrans:
|
||||
IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
|
||||
IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
@ -2086,7 +2110,7 @@ struct xt_table *ipt_register_table(struct net *net,
|
||||
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
|
||||
memcpy(loc_cpu_entry, repl->entries, repl->size);
|
||||
|
||||
ret = translate_table(table->name, table->valid_hooks,
|
||||
ret = translate_table(net, table->name, table->valid_hooks,
|
||||
newinfo, loc_cpu_entry, repl->size,
|
||||
repl->num_entries,
|
||||
repl->hook_entry,
|
||||
@ -2108,7 +2132,7 @@ out:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void ipt_unregister_table(struct xt_table *table)
|
||||
void ipt_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
@ -2118,7 +2142,7 @@ void ipt_unregister_table(struct xt_table *table)
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries[raw_smp_processor_id()];
|
||||
IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
|
||||
IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
|
@ -560,8 +560,7 @@ struct clusterip_seq_position {
|
||||
|
||||
static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
const struct proc_dir_entry *pde = s->private;
|
||||
struct clusterip_config *c = pde->data;
|
||||
struct clusterip_config *c = s->private;
|
||||
unsigned int weight;
|
||||
u_int32_t local_nodes;
|
||||
struct clusterip_seq_position *idx;
|
||||
@ -632,10 +631,9 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
|
||||
|
||||
if (!ret) {
|
||||
struct seq_file *sf = file->private_data;
|
||||
struct proc_dir_entry *pde = PDE(inode);
|
||||
struct clusterip_config *c = pde->data;
|
||||
struct clusterip_config *c = PDE(inode)->data;
|
||||
|
||||
sf->private = pde;
|
||||
sf->private = c;
|
||||
|
||||
clusterip_config_get(c);
|
||||
}
|
||||
@ -645,8 +643,7 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
|
||||
|
||||
static int clusterip_proc_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_dir_entry *pde = PDE(inode);
|
||||
struct clusterip_config *c = pde->data;
|
||||
struct clusterip_config *c = PDE(inode)->data;
|
||||
int ret;
|
||||
|
||||
ret = seq_release(inode, file);
|
||||
@ -660,10 +657,9 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
|
||||
static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
|
||||
size_t size, loff_t *ofs)
|
||||
{
|
||||
struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
|
||||
#define PROC_WRITELEN 10
|
||||
char buffer[PROC_WRITELEN+1];
|
||||
const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
|
||||
struct clusterip_config *c = pde->data;
|
||||
unsigned long nodenum;
|
||||
|
||||
if (copy_from_user(buffer, input, PROC_WRITELEN))
|
||||
|
@ -338,7 +338,7 @@ struct compat_ipt_ulog_info {
|
||||
char prefix[ULOG_PREFIX_LEN];
|
||||
};
|
||||
|
||||
static void ulog_tg_compat_from_user(void *dst, void *src)
|
||||
static void ulog_tg_compat_from_user(void *dst, const void *src)
|
||||
{
|
||||
const struct compat_ipt_ulog_info *cl = src;
|
||||
struct ipt_ulog_info l = {
|
||||
@ -351,7 +351,7 @@ static void ulog_tg_compat_from_user(void *dst, void *src)
|
||||
memcpy(dst, &l, sizeof(l));
|
||||
}
|
||||
|
||||
static int ulog_tg_compat_to_user(void __user *dst, void *src)
|
||||
static int ulog_tg_compat_to_user(void __user *dst, const void *src)
|
||||
{
|
||||
const struct ipt_ulog_info *l = src;
|
||||
struct compat_ipt_ulog_info cl = {
|
||||
|
@ -23,104 +23,32 @@ MODULE_DESCRIPTION("iptables filter table");
|
||||
(1 << NF_INET_FORWARD) | \
|
||||
(1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV4,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ipt_local_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_filter);
|
||||
}
|
||||
const struct net *net;
|
||||
|
||||
static unsigned int
|
||||
ipt_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_local_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
if (hook == NF_INET_LOCAL_OUT &&
|
||||
(skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)))
|
||||
/* root is playing with raw sockets. */
|
||||
return NF_ACCEPT;
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_filter);
|
||||
|
||||
net = dev_net((in != NULL) ? in : out);
|
||||
return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ipt_local_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = ipt_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = ipt_local_out_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_FILTER,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *filter_ops __read_mostly;
|
||||
|
||||
/* Default to forward because I got too much mail already. */
|
||||
static int forward = NF_ACCEPT;
|
||||
@ -128,9 +56,18 @@ module_param(forward, bool, 0000);
|
||||
|
||||
static int __net_init iptable_filter_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&packet_filter);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
((struct ipt_standard *)repl->entries)[1].target.verdict =
|
||||
-forward - 1;
|
||||
|
||||
net->ipv4.iptable_filter =
|
||||
ipt_register_table(net, &packet_filter, &initial_table.repl);
|
||||
ipt_register_table(net, &packet_filter, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_filter))
|
||||
return PTR_ERR(net->ipv4.iptable_filter);
|
||||
return 0;
|
||||
@ -138,7 +75,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_filter_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_filter);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_filter);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_filter_net_ops = {
|
||||
@ -155,17 +92,16 @@ static int __init iptable_filter_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
initial_table.entries[1].target.verdict = -forward - 1;
|
||||
|
||||
ret = register_pernet_subsys(&iptable_filter_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
if (ret < 0)
|
||||
filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
|
||||
if (IS_ERR(filter_ops)) {
|
||||
ret = PTR_ERR(filter_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -176,7 +112,7 @@ static int __init iptable_filter_init(void)
|
||||
|
||||
static void __exit iptable_filter_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
xt_hook_unlink(&packet_filter, filter_ops);
|
||||
unregister_pernet_subsys(&iptable_filter_net_ops);
|
||||
}
|
||||
|
||||
|
@ -27,101 +27,16 @@ MODULE_DESCRIPTION("iptables mangle table");
|
||||
(1 << NF_INET_LOCAL_OUT) | \
|
||||
(1 << NF_INET_POST_ROUTING))
|
||||
|
||||
/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[5];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.num_entries = 6,
|
||||
.size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_mangler = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV4,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ipt_pre_routing_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_post_routing_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_local_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_forward_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_local_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
|
||||
{
|
||||
unsigned int ret;
|
||||
const struct iphdr *iph;
|
||||
@ -141,7 +56,7 @@ ipt_local_hook(unsigned int hook,
|
||||
daddr = iph->daddr;
|
||||
tos = iph->tos;
|
||||
|
||||
ret = ipt_do_table(skb, hook, in, out,
|
||||
ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
|
||||
dev_net(out)->ipv4.iptable_mangle);
|
||||
/* Reroute for ANY change. */
|
||||
if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
|
||||
@ -158,49 +73,36 @@ ipt_local_hook(unsigned int hook,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ipt_pre_routing_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_PRE_ROUTING,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_local_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_forward_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_local_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_post_routing_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_POST_ROUTING,
|
||||
.priority = NF_IP_PRI_MANGLE,
|
||||
},
|
||||
};
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
iptable_mangle_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
if (hook == NF_INET_LOCAL_OUT)
|
||||
return ipt_mangle_out(skb, out);
|
||||
if (hook == NF_INET_POST_ROUTING)
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_mangle);
|
||||
/* PREROUTING/INPUT/FORWARD: */
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops *mangle_ops __read_mostly;
|
||||
|
||||
static int __net_init iptable_mangle_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&packet_mangler);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.iptable_mangle =
|
||||
ipt_register_table(net, &packet_mangler, &initial_table.repl);
|
||||
ipt_register_table(net, &packet_mangler, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_mangle))
|
||||
return PTR_ERR(net->ipv4.iptable_mangle);
|
||||
return 0;
|
||||
@ -208,7 +110,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_mangle_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_mangle);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_mangle);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_mangle_net_ops = {
|
||||
@ -225,9 +127,11 @@ static int __init iptable_mangle_init(void)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
if (ret < 0)
|
||||
mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
|
||||
if (IS_ERR(mangle_ops)) {
|
||||
ret = PTR_ERR(mangle_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -238,7 +142,7 @@ static int __init iptable_mangle_init(void)
|
||||
|
||||
static void __exit iptable_mangle_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
xt_hook_unlink(&packet_mangler, mangle_ops);
|
||||
unregister_pernet_subsys(&iptable_mangle_net_ops);
|
||||
}
|
||||
|
||||
|
@ -9,90 +9,44 @@
|
||||
|
||||
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[2];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.num_entries = 3,
|
||||
.size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_raw = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV4,
|
||||
.priority = NF_IP_PRI_RAW,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ipt_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_raw);
|
||||
}
|
||||
const struct net *net;
|
||||
|
||||
static unsigned int
|
||||
ipt_local_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
if (hook == NF_INET_LOCAL_OUT &&
|
||||
(skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)))
|
||||
/* root is playing with raw sockets. */
|
||||
return NF_ACCEPT;
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_raw);
|
||||
|
||||
net = dev_net((in != NULL) ? in : out);
|
||||
return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
|
||||
}
|
||||
|
||||
/* 'raw' is the very first table. */
|
||||
static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ipt_hook,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_PRE_ROUTING,
|
||||
.priority = NF_IP_PRI_RAW,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_local_hook,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_RAW,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *rawtable_ops __read_mostly;
|
||||
|
||||
static int __net_init iptable_raw_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&packet_raw);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.iptable_raw =
|
||||
ipt_register_table(net, &packet_raw, &initial_table.repl);
|
||||
ipt_register_table(net, &packet_raw, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_raw))
|
||||
return PTR_ERR(net->ipv4.iptable_raw);
|
||||
return 0;
|
||||
@ -100,7 +54,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_raw_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_raw);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_raw);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_raw_net_ops = {
|
||||
@ -117,9 +71,11 @@ static int __init iptable_raw_init(void)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
if (ret < 0)
|
||||
rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
|
||||
if (IS_ERR(rawtable_ops)) {
|
||||
ret = PTR_ERR(rawtable_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -130,7 +86,7 @@ static int __init iptable_raw_init(void)
|
||||
|
||||
static void __exit iptable_raw_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
xt_hook_unlink(&packet_raw, rawtable_ops);
|
||||
unregister_pernet_subsys(&iptable_raw_net_ops);
|
||||
}
|
||||
|
||||
|
@ -27,109 +27,44 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
|
||||
(1 << NF_INET_FORWARD) | \
|
||||
(1 << NF_INET_LOCAL_OUT)
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "security",
|
||||
.valid_hooks = SECURITY_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table security_table = {
|
||||
.name = "security",
|
||||
.valid_hooks = SECURITY_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV4,
|
||||
.priority = NF_IP_PRI_SECURITY,
|
||||
};
|
||||
|
||||
static unsigned int
|
||||
ipt_local_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
iptable_security_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_security);
|
||||
}
|
||||
const struct net *net;
|
||||
|
||||
static unsigned int
|
||||
ipt_forward_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv4.iptable_security);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_local_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* Somebody is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr))
|
||||
if (hook == NF_INET_LOCAL_OUT &&
|
||||
(skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)))
|
||||
/* Somebody is playing with raw sockets. */
|
||||
return NF_ACCEPT;
|
||||
return ipt_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv4.iptable_security);
|
||||
|
||||
net = dev_net((in != NULL) ? in : out);
|
||||
return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ipt_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ipt_local_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP_PRI_SECURITY,
|
||||
},
|
||||
{
|
||||
.hook = ipt_forward_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = NF_IP_PRI_SECURITY,
|
||||
},
|
||||
{
|
||||
.hook = ipt_local_out_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_SECURITY,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *sectbl_ops __read_mostly;
|
||||
|
||||
static int __net_init iptable_security_net_init(struct net *net)
|
||||
{
|
||||
net->ipv4.iptable_security =
|
||||
ipt_register_table(net, &security_table, &initial_table.repl);
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&security_table);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.iptable_security =
|
||||
ipt_register_table(net, &security_table, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.iptable_security))
|
||||
return PTR_ERR(net->ipv4.iptable_security);
|
||||
|
||||
@ -138,7 +73,7 @@ static int __net_init iptable_security_net_init(struct net *net)
|
||||
|
||||
static void __net_exit iptable_security_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.iptable_security);
|
||||
ipt_unregister_table(net, net->ipv4.iptable_security);
|
||||
}
|
||||
|
||||
static struct pernet_operations iptable_security_net_ops = {
|
||||
@ -154,9 +89,11 @@ static int __init iptable_security_init(void)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
if (ret < 0)
|
||||
sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
|
||||
if (IS_ERR(sectbl_ops)) {
|
||||
ret = PTR_ERR(sectbl_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -167,7 +104,7 @@ cleanup_table:
|
||||
|
||||
static void __exit iptable_security_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
|
||||
xt_hook_unlink(&security_table, sectbl_ops);
|
||||
unregister_pernet_subsys(&iptable_security_net_ops);
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
|
||||
#include <net/netfilter/nf_nat_helper.h>
|
||||
@ -266,7 +267,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
h = nf_conntrack_find_get(sock_net(sk), &tuple);
|
||||
h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
|
||||
if (h) {
|
||||
struct sockaddr_in sin;
|
||||
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
|
||||
@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
|
||||
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
|
||||
static int
|
||||
icmp_error_message(struct net *net, struct sk_buff *skb,
|
||||
icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
struct nf_conntrack_tuple innertuple, origtuple;
|
||||
const struct nf_conntrack_l4proto *innerproto;
|
||||
const struct nf_conntrack_tuple_hash *h;
|
||||
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
||||
|
||||
NF_CT_ASSERT(skb->nfct == NULL);
|
||||
|
||||
@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
|
||||
|
||||
*ctinfo = IP_CT_RELATED;
|
||||
|
||||
h = nf_conntrack_find_get(net, &innertuple);
|
||||
h = nf_conntrack_find_get(net, zone, &innertuple);
|
||||
if (!h) {
|
||||
pr_debug("icmp_error_message: no match\n");
|
||||
return -NF_ACCEPT;
|
||||
@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
|
||||
|
||||
/* Small and modified version of icmp_rcv */
|
||||
static int
|
||||
icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
icmp_error(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
|
||||
{
|
||||
const struct icmphdr *icmph;
|
||||
@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
icmph->type != ICMP_REDIRECT)
|
||||
return NF_ACCEPT;
|
||||
|
||||
return icmp_error_message(net, skb, ctinfo, hooknum);
|
||||
return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
|
@ -16,7 +16,9 @@
|
||||
|
||||
#include <linux/netfilter_bridge.h>
|
||||
#include <linux/netfilter_ipv4.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
|
||||
/* Returns new sk_buff, or NULL */
|
||||
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
|
||||
@ -38,15 +40,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
|
||||
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u16 zone = NF_CT_DEFAULT_ZONE;
|
||||
|
||||
if (skb->nfct)
|
||||
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
|
||||
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
if (skb->nf_bridge &&
|
||||
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
|
||||
return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
|
||||
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
|
||||
#endif
|
||||
if (hooknum == NF_INET_PRE_ROUTING)
|
||||
return IP_DEFRAG_CONNTRACK_IN;
|
||||
return IP_DEFRAG_CONNTRACK_IN + zone;
|
||||
else
|
||||
return IP_DEFRAG_CONNTRACK_OUT;
|
||||
return IP_DEFRAG_CONNTRACK_OUT + zone;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
|
||||
@ -59,7 +66,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
|
||||
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
|
||||
/* Previously seen (loopback)? Ignore. Do this before
|
||||
fragment check. */
|
||||
if (skb->nfct)
|
||||
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
|
||||
return NF_ACCEPT;
|
||||
#endif
|
||||
#endif
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
|
||||
static DEFINE_SPINLOCK(nf_nat_lock);
|
||||
|
||||
@ -69,13 +70,14 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
|
||||
|
||||
/* We keep an extra hash for each conntrack, for fast searching. */
|
||||
static inline unsigned int
|
||||
hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
hash_by_src(const struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
unsigned int hash;
|
||||
|
||||
/* Original src, to ensure we map it consistently if poss. */
|
||||
hash = jhash_3words((__force u32)tuple->src.u3.ip,
|
||||
(__force u32)tuple->src.u.all,
|
||||
(__force u32)tuple->src.u.all ^ zone,
|
||||
tuple->dst.protonum, 0);
|
||||
return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
|
||||
}
|
||||
@ -139,12 +141,12 @@ same_src(const struct nf_conn *ct,
|
||||
|
||||
/* Only called for SRC manip */
|
||||
static int
|
||||
find_appropriate_src(struct net *net,
|
||||
find_appropriate_src(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *result,
|
||||
const struct nf_nat_range *range)
|
||||
{
|
||||
unsigned int h = hash_by_src(net, tuple);
|
||||
unsigned int h = hash_by_src(net, zone, tuple);
|
||||
const struct nf_conn_nat *nat;
|
||||
const struct nf_conn *ct;
|
||||
const struct hlist_node *n;
|
||||
@ -152,7 +154,7 @@ find_appropriate_src(struct net *net,
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
|
||||
ct = nat->ct;
|
||||
if (same_src(ct, tuple)) {
|
||||
if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
|
||||
/* Copy source part from reply tuple. */
|
||||
nf_ct_invert_tuplepr(result,
|
||||
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
@ -175,7 +177,7 @@ find_appropriate_src(struct net *net,
|
||||
the ip with the lowest src-ip/dst-ip/proto usage.
|
||||
*/
|
||||
static void
|
||||
find_best_ips_proto(struct nf_conntrack_tuple *tuple,
|
||||
find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_nat_range *range,
|
||||
const struct nf_conn *ct,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
@ -209,7 +211,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
|
||||
maxip = ntohl(range->max_ip);
|
||||
j = jhash_2words((__force u32)tuple->src.u3.ip,
|
||||
range->flags & IP_NAT_RANGE_PERSISTENT ?
|
||||
0 : (__force u32)tuple->dst.u3.ip, 0);
|
||||
0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
|
||||
j = ((u64)j * (maxip - minip + 1)) >> 32;
|
||||
*var_ipp = htonl(minip + j);
|
||||
}
|
||||
@ -229,6 +231,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
const struct nf_nat_protocol *proto;
|
||||
u16 zone = nf_ct_zone(ct);
|
||||
|
||||
/* 1) If this srcip/proto/src-proto-part is currently mapped,
|
||||
and that same mapping gives a unique tuple within the given
|
||||
@ -239,7 +242,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
manips not an issue. */
|
||||
if (maniptype == IP_NAT_MANIP_SRC &&
|
||||
!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
|
||||
if (find_appropriate_src(net, orig_tuple, tuple, range)) {
|
||||
if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
|
||||
pr_debug("get_unique_tuple: Found current src map\n");
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
return;
|
||||
@ -249,7 +252,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
/* 2) Select the least-used IP/proto combination in the given
|
||||
range. */
|
||||
*tuple = *orig_tuple;
|
||||
find_best_ips_proto(tuple, range, ct, maniptype);
|
||||
find_best_ips_proto(zone, tuple, range, ct, maniptype);
|
||||
|
||||
/* 3) The per-protocol part of the manip is made to map into
|
||||
the range to make a unique tuple. */
|
||||
@ -327,7 +330,8 @@ nf_nat_setup_info(struct nf_conn *ct,
|
||||
if (have_to_hash) {
|
||||
unsigned int srchash;
|
||||
|
||||
srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
srchash = hash_by_src(net, nf_ct_zone(ct),
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
spin_lock_bh(&nf_nat_lock);
|
||||
/* nf_conntrack_alter_reply might re-allocate exntension aera */
|
||||
nat = nfct_nat(ct);
|
||||
|
@ -27,76 +27,29 @@ MODULE_ALIAS("ip_nat_ftp");
|
||||
|
||||
/* FIXME: Time out? --RR */
|
||||
|
||||
static int
|
||||
mangle_rfc959_packet(struct sk_buff *skb,
|
||||
__be32 newip,
|
||||
u_int16_t port,
|
||||
unsigned int matchoff,
|
||||
unsigned int matchlen,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
|
||||
char *buffer, size_t buflen,
|
||||
__be32 addr, u16 port)
|
||||
{
|
||||
char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
|
||||
switch (type) {
|
||||
case NF_CT_FTP_PORT:
|
||||
case NF_CT_FTP_PASV:
|
||||
return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
|
||||
((unsigned char *)&addr)[0],
|
||||
((unsigned char *)&addr)[1],
|
||||
((unsigned char *)&addr)[2],
|
||||
((unsigned char *)&addr)[3],
|
||||
port >> 8,
|
||||
port & 0xFF);
|
||||
case NF_CT_FTP_EPRT:
|
||||
return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
|
||||
case NF_CT_FTP_EPSV:
|
||||
return snprintf(buffer, buflen, "|||%u|", port);
|
||||
}
|
||||
|
||||
sprintf(buffer, "%u,%u,%u,%u,%u,%u",
|
||||
NIPQUAD(newip), port>>8, port&0xFF);
|
||||
|
||||
pr_debug("calling nf_nat_mangle_tcp_packet\n");
|
||||
|
||||
return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, strlen(buffer));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* |1|132.235.1.2|6275| */
|
||||
static int
|
||||
mangle_eprt_packet(struct sk_buff *skb,
|
||||
__be32 newip,
|
||||
u_int16_t port,
|
||||
unsigned int matchoff,
|
||||
unsigned int matchlen,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
char buffer[sizeof("|1|255.255.255.255|65535|")];
|
||||
|
||||
sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
|
||||
|
||||
pr_debug("calling nf_nat_mangle_tcp_packet\n");
|
||||
|
||||
return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, strlen(buffer));
|
||||
}
|
||||
|
||||
/* |1|132.235.1.2|6275| */
|
||||
static int
|
||||
mangle_epsv_packet(struct sk_buff *skb,
|
||||
__be32 newip,
|
||||
u_int16_t port,
|
||||
unsigned int matchoff,
|
||||
unsigned int matchlen,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
char buffer[sizeof("|||65535|")];
|
||||
|
||||
sprintf(buffer, "|||%u|", port);
|
||||
|
||||
pr_debug("calling nf_nat_mangle_tcp_packet\n");
|
||||
|
||||
return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, strlen(buffer));
|
||||
}
|
||||
|
||||
static int (*mangle[])(struct sk_buff *, __be32, u_int16_t,
|
||||
unsigned int, unsigned int, struct nf_conn *,
|
||||
enum ip_conntrack_info)
|
||||
= {
|
||||
[NF_CT_FTP_PORT] = mangle_rfc959_packet,
|
||||
[NF_CT_FTP_PASV] = mangle_rfc959_packet,
|
||||
[NF_CT_FTP_EPRT] = mangle_eprt_packet,
|
||||
[NF_CT_FTP_EPSV] = mangle_epsv_packet
|
||||
};
|
||||
|
||||
/* So, this packet has hit the connection tracking matching code.
|
||||
Mangle it, and change the expectation to match the new version. */
|
||||
static unsigned int nf_nat_ftp(struct sk_buff *skb,
|
||||
@ -110,6 +63,8 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
|
||||
u_int16_t port;
|
||||
int dir = CTINFO2DIR(ctinfo);
|
||||
struct nf_conn *ct = exp->master;
|
||||
char buffer[sizeof("|1|255.255.255.255|65535|")];
|
||||
unsigned int buflen;
|
||||
|
||||
pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
|
||||
|
||||
@ -132,11 +87,21 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
|
||||
if (port == 0)
|
||||
return NF_DROP;
|
||||
|
||||
if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) {
|
||||
nf_ct_unexpect_related(exp);
|
||||
return NF_DROP;
|
||||
}
|
||||
buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
|
||||
if (!buflen)
|
||||
goto out;
|
||||
|
||||
pr_debug("calling nf_nat_mangle_tcp_packet\n");
|
||||
|
||||
if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
|
||||
matchlen, buffer, buflen))
|
||||
goto out;
|
||||
|
||||
return NF_ACCEPT;
|
||||
|
||||
out:
|
||||
nf_ct_unexpect_related(exp);
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
static void __exit nf_nat_ftp_fini(void)
|
||||
|
@ -141,6 +141,17 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
|
||||
__be32 seq, s16 off)
|
||||
{
|
||||
if (!off)
|
||||
return;
|
||||
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
|
||||
adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
|
||||
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
|
||||
|
||||
/* Generic function for mangling variable-length address changes inside
|
||||
* NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
|
||||
* command in FTP).
|
||||
@ -149,14 +160,13 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
|
||||
* skb enlargement, ...
|
||||
*
|
||||
* */
|
||||
int
|
||||
nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int match_offset,
|
||||
unsigned int match_len,
|
||||
const char *rep_buffer,
|
||||
unsigned int rep_len)
|
||||
int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo,
|
||||
unsigned int match_offset,
|
||||
unsigned int match_len,
|
||||
const char *rep_buffer,
|
||||
unsigned int rep_len, bool adjust)
|
||||
{
|
||||
struct rtable *rt = skb_rtable(skb);
|
||||
struct iphdr *iph;
|
||||
@ -202,16 +212,13 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||
inet_proto_csum_replace2(&tcph->check, skb,
|
||||
htons(oldlen), htons(datalen), 1);
|
||||
|
||||
if (rep_len != match_len) {
|
||||
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
|
||||
adjust_tcp_sequence(ntohl(tcph->seq),
|
||||
(int)rep_len - (int)match_len,
|
||||
ct, ctinfo);
|
||||
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
|
||||
}
|
||||
if (adjust && rep_len != match_len)
|
||||
nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
|
||||
(int)rep_len - (int)match_len);
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
|
||||
EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
|
||||
|
||||
/* Generic function for mangling variable-length address changes inside
|
||||
* NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <net/netfilter/nf_nat_rule.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <linux/netfilter/nf_conntrack_proto_gre.h>
|
||||
#include <linux/netfilter/nf_conntrack_pptp.h>
|
||||
|
||||
@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
|
||||
|
||||
pr_debug("trying to unexpect other dir: ");
|
||||
nf_ct_dump_tuple_ip(&t);
|
||||
other_exp = nf_ct_expect_find_get(net, &t);
|
||||
other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
|
||||
if (other_exp) {
|
||||
nf_ct_unexpect_related(other_exp);
|
||||
nf_ct_expect_put(other_exp);
|
||||
|
@ -28,36 +28,6 @@
|
||||
(1 << NF_INET_POST_ROUTING) | \
|
||||
(1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} nat_initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "nat",
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table nat_table = {
|
||||
.name = "nat",
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
@ -186,8 +156,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = {
|
||||
|
||||
static int __net_init nf_nat_rule_net_init(struct net *net)
|
||||
{
|
||||
net->ipv4.nat_table = ipt_register_table(net, &nat_table,
|
||||
&nat_initial_table.repl);
|
||||
struct ipt_replace *repl;
|
||||
|
||||
repl = ipt_alloc_initial_table(&nat_table);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv4.nat_table))
|
||||
return PTR_ERR(net->ipv4.nat_table);
|
||||
return 0;
|
||||
@ -195,7 +170,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net)
|
||||
|
||||
static void __net_exit nf_nat_rule_net_exit(struct net *net)
|
||||
{
|
||||
ipt_unregister_table(net->ipv4.nat_table);
|
||||
ipt_unregister_table(net, net->ipv4.nat_table);
|
||||
}
|
||||
|
||||
static struct pernet_operations nf_nat_rule_net_ops = {
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* SIP extension for UDP NAT alteration.
|
||||
/* SIP extension for NAT alteration.
|
||||
*
|
||||
* (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
|
||||
* based on RR's ip_nat_ftp.c and other modules.
|
||||
@ -15,6 +15,7 @@
|
||||
#include <linux/ip.h>
|
||||
#include <net/ip.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
#include <net/netfilter/nf_nat.h>
|
||||
#include <net/netfilter/nf_nat_helper.h>
|
||||
@ -29,25 +30,42 @@ MODULE_DESCRIPTION("SIP NAT helper");
|
||||
MODULE_ALIAS("ip_nat_sip");
|
||||
|
||||
|
||||
static unsigned int mangle_packet(struct sk_buff *skb,
|
||||
static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int matchoff, unsigned int matchlen,
|
||||
const char *buffer, unsigned int buflen)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
struct tcphdr *th;
|
||||
unsigned int baseoff;
|
||||
|
||||
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen,
|
||||
buffer, buflen))
|
||||
return 0;
|
||||
if (nf_ct_protonum(ct) == IPPROTO_TCP) {
|
||||
th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
|
||||
baseoff = ip_hdrlen(skb) + th->doff * 4;
|
||||
matchoff += dataoff - baseoff;
|
||||
|
||||
if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
|
||||
matchoff, matchlen,
|
||||
buffer, buflen, false))
|
||||
return 0;
|
||||
} else {
|
||||
baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
|
||||
matchoff += dataoff - baseoff;
|
||||
|
||||
if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
|
||||
matchoff, matchlen,
|
||||
buffer, buflen))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Reload data pointer and adjust datalen value */
|
||||
*dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr);
|
||||
*dptr = skb->data + dataoff;
|
||||
*datalen += buflen - matchlen;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int map_addr(struct sk_buff *skb,
|
||||
static int map_addr(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int matchoff, unsigned int matchlen,
|
||||
union nf_inet_addr *addr, __be16 port)
|
||||
@ -76,11 +94,11 @@ static int map_addr(struct sk_buff *skb,
|
||||
|
||||
buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
|
||||
|
||||
return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
|
||||
return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
buffer, buflen);
|
||||
}
|
||||
|
||||
static int map_sip_addr(struct sk_buff *skb,
|
||||
static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
enum sip_header_types type)
|
||||
{
|
||||
@ -93,16 +111,18 @@ static int map_sip_addr(struct sk_buff *skb,
|
||||
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
|
||||
&matchoff, &matchlen, &addr, &port) <= 0)
|
||||
return 1;
|
||||
return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port);
|
||||
return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
&addr, port);
|
||||
}
|
||||
|
||||
static unsigned int ip_nat_sip(struct sk_buff *skb,
|
||||
static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
unsigned int dataoff, matchoff, matchlen;
|
||||
unsigned int coff, matchoff, matchlen;
|
||||
enum sip_header_types hdr;
|
||||
union nf_inet_addr addr;
|
||||
__be16 port;
|
||||
int request, in_header;
|
||||
@ -112,16 +132,21 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
|
||||
if (ct_sip_parse_request(ct, *dptr, *datalen,
|
||||
&matchoff, &matchlen,
|
||||
&addr, &port) > 0 &&
|
||||
!map_addr(skb, dptr, datalen, matchoff, matchlen,
|
||||
!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
&addr, port))
|
||||
return NF_DROP;
|
||||
request = 1;
|
||||
} else
|
||||
request = 0;
|
||||
|
||||
if (nf_ct_protonum(ct) == IPPROTO_TCP)
|
||||
hdr = SIP_HDR_VIA_TCP;
|
||||
else
|
||||
hdr = SIP_HDR_VIA_UDP;
|
||||
|
||||
/* Translate topmost Via header and parameters */
|
||||
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
|
||||
SIP_HDR_VIA, NULL, &matchoff, &matchlen,
|
||||
hdr, NULL, &matchoff, &matchlen,
|
||||
&addr, &port) > 0) {
|
||||
unsigned int matchend, poff, plen, buflen, n;
|
||||
char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
|
||||
@ -138,7 +163,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (!map_addr(skb, dptr, datalen, matchoff, matchlen,
|
||||
if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
&addr, port))
|
||||
return NF_DROP;
|
||||
|
||||
@ -153,8 +178,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
|
||||
addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
|
||||
buflen = sprintf(buffer, "%pI4",
|
||||
&ct->tuplehash[!dir].tuple.dst.u3.ip);
|
||||
if (!mangle_packet(skb, dptr, datalen, poff, plen,
|
||||
buffer, buflen))
|
||||
if (!mangle_packet(skb, dataoff, dptr, datalen,
|
||||
poff, plen, buffer, buflen))
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
@ -167,8 +192,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
|
||||
addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
|
||||
buflen = sprintf(buffer, "%pI4",
|
||||
&ct->tuplehash[!dir].tuple.src.u3.ip);
|
||||
if (!mangle_packet(skb, dptr, datalen, poff, plen,
|
||||
buffer, buflen))
|
||||
if (!mangle_packet(skb, dataoff, dptr, datalen,
|
||||
poff, plen, buffer, buflen))
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
@ -181,31 +206,45 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
|
||||
htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
|
||||
__be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
|
||||
buflen = sprintf(buffer, "%u", ntohs(p));
|
||||
if (!mangle_packet(skb, dptr, datalen, poff, plen,
|
||||
buffer, buflen))
|
||||
if (!mangle_packet(skb, dataoff, dptr, datalen,
|
||||
poff, plen, buffer, buflen))
|
||||
return NF_DROP;
|
||||
}
|
||||
}
|
||||
|
||||
next:
|
||||
/* Translate Contact headers */
|
||||
dataoff = 0;
|
||||
coff = 0;
|
||||
in_header = 0;
|
||||
while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
|
||||
while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
|
||||
SIP_HDR_CONTACT, &in_header,
|
||||
&matchoff, &matchlen,
|
||||
&addr, &port) > 0) {
|
||||
if (!map_addr(skb, dptr, datalen, matchoff, matchlen,
|
||||
if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
&addr, port))
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) ||
|
||||
!map_sip_addr(skb, dptr, datalen, SIP_HDR_TO))
|
||||
if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
|
||||
!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
|
||||
return NF_DROP;
|
||||
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
const struct tcphdr *th;
|
||||
|
||||
if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
|
||||
return;
|
||||
|
||||
th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
|
||||
nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
|
||||
}
|
||||
|
||||
/* Handles expected signalling connections and media streams */
|
||||
static void ip_nat_sip_expected(struct nf_conn *ct,
|
||||
struct nf_conntrack_expect *exp)
|
||||
@ -232,7 +271,7 @@ static void ip_nat_sip_expected(struct nf_conn *ct,
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
|
||||
static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
struct nf_conntrack_expect *exp,
|
||||
unsigned int matchoff,
|
||||
@ -279,8 +318,8 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
|
||||
if (exp->tuple.dst.u3.ip != exp->saved_ip ||
|
||||
exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
|
||||
buflen = sprintf(buffer, "%pI4:%u", &newip, port);
|
||||
if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
|
||||
buffer, buflen))
|
||||
if (!mangle_packet(skb, dataoff, dptr, datalen,
|
||||
matchoff, matchlen, buffer, buflen))
|
||||
goto err;
|
||||
}
|
||||
return NF_ACCEPT;
|
||||
@ -290,7 +329,7 @@ err:
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
static int mangle_content_len(struct sk_buff *skb,
|
||||
static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
@ -312,12 +351,13 @@ static int mangle_content_len(struct sk_buff *skb,
|
||||
return 0;
|
||||
|
||||
buflen = sprintf(buffer, "%u", c_len);
|
||||
return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
|
||||
return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
buffer, buflen);
|
||||
}
|
||||
|
||||
static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
|
||||
unsigned int dataoff, unsigned int *datalen,
|
||||
static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
enum sdp_header_types type,
|
||||
enum sdp_header_types term,
|
||||
char *buffer, int buflen)
|
||||
@ -326,16 +366,16 @@ static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
unsigned int matchlen, matchoff;
|
||||
|
||||
if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term,
|
||||
if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
|
||||
&matchoff, &matchlen) <= 0)
|
||||
return -ENOENT;
|
||||
return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
|
||||
return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
buffer, buflen) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
|
||||
unsigned int dataoff,
|
||||
unsigned int *datalen,
|
||||
static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
enum sdp_header_types type,
|
||||
enum sdp_header_types term,
|
||||
const union nf_inet_addr *addr)
|
||||
@ -344,16 +384,15 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
|
||||
unsigned int buflen;
|
||||
|
||||
buflen = sprintf(buffer, "%pI4", &addr->ip);
|
||||
if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
|
||||
if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
|
||||
buffer, buflen))
|
||||
return 0;
|
||||
|
||||
return mangle_content_len(skb, dptr, datalen);
|
||||
return mangle_content_len(skb, dataoff, dptr, datalen);
|
||||
}
|
||||
|
||||
static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int matchoff,
|
||||
unsigned int matchlen,
|
||||
u_int16_t port)
|
||||
@ -362,16 +401,16 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
|
||||
unsigned int buflen;
|
||||
|
||||
buflen = sprintf(buffer, "%u", port);
|
||||
if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
|
||||
if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
|
||||
buffer, buflen))
|
||||
return 0;
|
||||
|
||||
return mangle_content_len(skb, dptr, datalen);
|
||||
return mangle_content_len(skb, dataoff, dptr, datalen);
|
||||
}
|
||||
|
||||
static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
|
||||
unsigned int dataoff,
|
||||
unsigned int *datalen,
|
||||
static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
const union nf_inet_addr *addr)
|
||||
{
|
||||
char buffer[sizeof("nnn.nnn.nnn.nnn")];
|
||||
@ -379,12 +418,12 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
|
||||
|
||||
/* Mangle session description owner and contact addresses */
|
||||
buflen = sprintf(buffer, "%pI4", &addr->ip);
|
||||
if (mangle_sdp_packet(skb, dptr, dataoff, datalen,
|
||||
if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
|
||||
SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
|
||||
buffer, buflen))
|
||||
return 0;
|
||||
|
||||
switch (mangle_sdp_packet(skb, dptr, dataoff, datalen,
|
||||
switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
|
||||
SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
|
||||
buffer, buflen)) {
|
||||
case 0:
|
||||
@ -401,14 +440,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
return mangle_content_len(skb, dptr, datalen);
|
||||
return mangle_content_len(skb, dataoff, dptr, datalen);
|
||||
}
|
||||
|
||||
/* So, this packet has hit the connection tracking matching code.
|
||||
Mangle it, and change the expectation to match the new version. */
|
||||
static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
struct nf_conntrack_expect *rtp_exp,
|
||||
struct nf_conntrack_expect *rtcp_exp,
|
||||
unsigned int mediaoff,
|
||||
@ -456,7 +494,8 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
|
||||
|
||||
/* Update media port. */
|
||||
if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
|
||||
!ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port))
|
||||
!ip_nat_sdp_port(skb, dataoff, dptr, datalen,
|
||||
mediaoff, medialen, port))
|
||||
goto err2;
|
||||
|
||||
return NF_ACCEPT;
|
||||
@ -471,6 +510,7 @@ err1:
|
||||
static void __exit nf_nat_sip_fini(void)
|
||||
{
|
||||
rcu_assign_pointer(nf_nat_sip_hook, NULL);
|
||||
rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
|
||||
rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
|
||||
rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
|
||||
rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
|
||||
@ -482,12 +522,14 @@ static void __exit nf_nat_sip_fini(void)
|
||||
static int __init nf_nat_sip_init(void)
|
||||
{
|
||||
BUG_ON(nf_nat_sip_hook != NULL);
|
||||
BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
|
||||
BUG_ON(nf_nat_sip_expect_hook != NULL);
|
||||
BUG_ON(nf_nat_sdp_addr_hook != NULL);
|
||||
BUG_ON(nf_nat_sdp_port_hook != NULL);
|
||||
BUG_ON(nf_nat_sdp_session_hook != NULL);
|
||||
BUG_ON(nf_nat_sdp_media_hook != NULL);
|
||||
rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
|
||||
rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
|
||||
rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
|
||||
rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
|
||||
rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
|
||||
|
@ -1038,7 +1038,7 @@ static int snmp_parse_mangle(unsigned char *msg,
|
||||
unsigned int cls, con, tag, vers, pdutype;
|
||||
struct asn1_ctx ctx;
|
||||
struct asn1_octstr comm;
|
||||
struct snmp_object **obj;
|
||||
struct snmp_object *obj;
|
||||
|
||||
if (debug > 1)
|
||||
hex_dump(msg, len);
|
||||
@ -1148,43 +1148,34 @@ static int snmp_parse_mangle(unsigned char *msg,
|
||||
if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
|
||||
return 0;
|
||||
|
||||
obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
|
||||
if (obj == NULL) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (!asn1_eoc_decode(&ctx, eoc)) {
|
||||
unsigned int i;
|
||||
|
||||
if (!snmp_object_decode(&ctx, obj)) {
|
||||
if (*obj) {
|
||||
kfree((*obj)->id);
|
||||
kfree(*obj);
|
||||
if (!snmp_object_decode(&ctx, &obj)) {
|
||||
if (obj) {
|
||||
kfree(obj->id);
|
||||
kfree(obj);
|
||||
}
|
||||
kfree(obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (debug > 1) {
|
||||
printk(KERN_DEBUG "bsalg: object: ");
|
||||
for (i = 0; i < (*obj)->id_len; i++) {
|
||||
for (i = 0; i < obj->id_len; i++) {
|
||||
if (i > 0)
|
||||
printk(".");
|
||||
printk("%lu", (*obj)->id[i]);
|
||||
printk("%lu", obj->id[i]);
|
||||
}
|
||||
printk(": type=%u\n", (*obj)->type);
|
||||
printk(": type=%u\n", obj->type);
|
||||
|
||||
}
|
||||
|
||||
if ((*obj)->type == SNMP_IPADDR)
|
||||
if (obj->type == SNMP_IPADDR)
|
||||
mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
|
||||
|
||||
kfree((*obj)->id);
|
||||
kfree(*obj);
|
||||
kfree(obj->id);
|
||||
kfree(obj);
|
||||
}
|
||||
kfree(obj);
|
||||
|
||||
if (!asn1_eoc_decode(&ctx, eoc))
|
||||
return 0;
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/netfilter_ipv6/ip6_tables.h>
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
#include "../../netfilter/xt_repldata.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
|
||||
@ -67,6 +68,12 @@ do { \
|
||||
#define inline
|
||||
#endif
|
||||
|
||||
void *ip6t_alloc_initial_table(const struct xt_table *info)
|
||||
{
|
||||
return xt_alloc_initial_table(ip6t, IP6T);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
|
||||
|
||||
/*
|
||||
We keep a set of rules for each CPU, so we can avoid write-locking
|
||||
them in the softirq when updating the counters and therefore
|
||||
@ -201,7 +208,7 @@ ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
|
||||
|
||||
/* Performance critical - called for every packet */
|
||||
static inline bool
|
||||
do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
|
||||
do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
|
||||
struct xt_match_param *par)
|
||||
{
|
||||
par->match = m->u.kernel.match;
|
||||
@ -215,7 +222,7 @@ do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static inline struct ip6t_entry *
|
||||
get_entry(void *base, unsigned int offset)
|
||||
get_entry(const void *base, unsigned int offset)
|
||||
{
|
||||
return (struct ip6t_entry *)(base + offset);
|
||||
}
|
||||
@ -229,6 +236,12 @@ static inline bool unconditional(const struct ip6t_ip6 *ipv6)
|
||||
return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
|
||||
}
|
||||
|
||||
static inline const struct ip6t_entry_target *
|
||||
ip6t_get_target_c(const struct ip6t_entry *e)
|
||||
{
|
||||
return ip6t_get_target((struct ip6t_entry *)e);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
|
||||
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
|
||||
/* This cries for unification! */
|
||||
@ -264,11 +277,11 @@ static struct nf_loginfo trace_loginfo = {
|
||||
|
||||
/* Mildly perf critical (only if packet tracing is on) */
|
||||
static inline int
|
||||
get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
|
||||
get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
|
||||
const char *hookname, const char **chainname,
|
||||
const char **comment, unsigned int *rulenum)
|
||||
{
|
||||
struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
|
||||
const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
|
||||
|
||||
if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
|
||||
/* Head of user chain: ERROR target with chainname */
|
||||
@ -294,15 +307,15 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void trace_packet(struct sk_buff *skb,
|
||||
static void trace_packet(const struct sk_buff *skb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
const char *tablename,
|
||||
struct xt_table_info *private,
|
||||
struct ip6t_entry *e)
|
||||
const struct xt_table_info *private,
|
||||
const struct ip6t_entry *e)
|
||||
{
|
||||
void *table_base;
|
||||
const void *table_base;
|
||||
const struct ip6t_entry *root;
|
||||
const char *hookname, *chainname, *comment;
|
||||
unsigned int rulenum = 0;
|
||||
@ -345,9 +358,9 @@ ip6t_do_table(struct sk_buff *skb,
|
||||
/* Initializing verdict to NF_DROP keeps gcc happy. */
|
||||
unsigned int verdict = NF_DROP;
|
||||
const char *indev, *outdev;
|
||||
void *table_base;
|
||||
const void *table_base;
|
||||
struct ip6t_entry *e, *back;
|
||||
struct xt_table_info *private;
|
||||
const struct xt_table_info *private;
|
||||
struct xt_match_param mtpar;
|
||||
struct xt_target_param tgpar;
|
||||
|
||||
@ -378,7 +391,7 @@ ip6t_do_table(struct sk_buff *skb,
|
||||
back = get_entry(table_base, private->underflow[hook]);
|
||||
|
||||
do {
|
||||
struct ip6t_entry_target *t;
|
||||
const struct ip6t_entry_target *t;
|
||||
|
||||
IP_NF_ASSERT(e);
|
||||
IP_NF_ASSERT(back);
|
||||
@ -393,7 +406,7 @@ ip6t_do_table(struct sk_buff *skb,
|
||||
ntohs(ipv6_hdr(skb)->payload_len) +
|
||||
sizeof(struct ipv6hdr), 1);
|
||||
|
||||
t = ip6t_get_target(e);
|
||||
t = ip6t_get_target_c(e);
|
||||
IP_NF_ASSERT(t->u.kernel.target);
|
||||
|
||||
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
|
||||
@ -475,7 +488,7 @@ ip6t_do_table(struct sk_buff *skb,
|
||||
/* Figures out from what hook each rule can be called: returns 0 if
|
||||
there are loops. Puts hook bitmask in comefrom. */
|
||||
static int
|
||||
mark_source_chains(struct xt_table_info *newinfo,
|
||||
mark_source_chains(const struct xt_table_info *newinfo,
|
||||
unsigned int valid_hooks, void *entry0)
|
||||
{
|
||||
unsigned int hook;
|
||||
@ -493,8 +506,8 @@ mark_source_chains(struct xt_table_info *newinfo,
|
||||
e->counters.pcnt = pos;
|
||||
|
||||
for (;;) {
|
||||
struct ip6t_standard_target *t
|
||||
= (void *)ip6t_get_target(e);
|
||||
const struct ip6t_standard_target *t
|
||||
= (void *)ip6t_get_target_c(e);
|
||||
int visited = e->comefrom & (1 << hook);
|
||||
|
||||
if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
|
||||
@ -585,13 +598,14 @@ mark_source_chains(struct xt_table_info *newinfo,
|
||||
}
|
||||
|
||||
static int
|
||||
cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
|
||||
cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
|
||||
{
|
||||
struct xt_mtdtor_param par;
|
||||
|
||||
if (i && (*i)-- == 0)
|
||||
return 1;
|
||||
|
||||
par.net = net;
|
||||
par.match = m->u.kernel.match;
|
||||
par.matchinfo = m->data;
|
||||
par.family = NFPROTO_IPV6;
|
||||
@ -602,9 +616,9 @@ cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
|
||||
}
|
||||
|
||||
static int
|
||||
check_entry(struct ip6t_entry *e, const char *name)
|
||||
check_entry(const struct ip6t_entry *e, const char *name)
|
||||
{
|
||||
struct ip6t_entry_target *t;
|
||||
const struct ip6t_entry_target *t;
|
||||
|
||||
if (!ip6_checkentry(&e->ipv6)) {
|
||||
duprintf("ip_tables: ip check failed %p %s.\n", e, name);
|
||||
@ -615,7 +629,7 @@ check_entry(struct ip6t_entry *e, const char *name)
|
||||
e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
t = ip6t_get_target(e);
|
||||
t = ip6t_get_target_c(e);
|
||||
if (e->target_offset + t->u.target_size > e->next_offset)
|
||||
return -EINVAL;
|
||||
|
||||
@ -668,10 +682,11 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_target(struct ip6t_entry *e, const char *name)
|
||||
static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
|
||||
{
|
||||
struct ip6t_entry_target *t = ip6t_get_target(e);
|
||||
struct xt_tgchk_param par = {
|
||||
.net = net,
|
||||
.table = name,
|
||||
.entryinfo = e,
|
||||
.target = t->u.kernel.target,
|
||||
@ -693,8 +708,8 @@ static int check_target(struct ip6t_entry *e, const char *name)
|
||||
}
|
||||
|
||||
static int
|
||||
find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
|
||||
unsigned int *i)
|
||||
find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
||||
unsigned int size, unsigned int *i)
|
||||
{
|
||||
struct ip6t_entry_target *t;
|
||||
struct xt_target *target;
|
||||
@ -707,6 +722,7 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
|
||||
return ret;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ipv6;
|
||||
mtpar.hook_mask = e->comefrom;
|
||||
@ -727,7 +743,7 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
|
||||
}
|
||||
t->u.kernel.target = target;
|
||||
|
||||
ret = check_target(e, name);
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -736,18 +752,18 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
|
||||
err:
|
||||
module_put(t->u.kernel.target->me);
|
||||
cleanup_matches:
|
||||
IP6T_MATCH_ITERATE(e, cleanup_match, &j);
|
||||
IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool check_underflow(struct ip6t_entry *e)
|
||||
static bool check_underflow(const struct ip6t_entry *e)
|
||||
{
|
||||
const struct ip6t_entry_target *t;
|
||||
unsigned int verdict;
|
||||
|
||||
if (!unconditional(&e->ipv6))
|
||||
return false;
|
||||
t = ip6t_get_target(e);
|
||||
t = ip6t_get_target_c(e);
|
||||
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
|
||||
return false;
|
||||
verdict = ((struct ip6t_standard_target *)t)->verdict;
|
||||
@ -758,8 +774,8 @@ static bool check_underflow(struct ip6t_entry *e)
|
||||
static int
|
||||
check_entry_size_and_hooks(struct ip6t_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int valid_hooks,
|
||||
@ -806,7 +822,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
|
||||
}
|
||||
|
||||
static int
|
||||
cleanup_entry(struct ip6t_entry *e, unsigned int *i)
|
||||
cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
|
||||
{
|
||||
struct xt_tgdtor_param par;
|
||||
struct ip6t_entry_target *t;
|
||||
@ -815,9 +831,10 @@ cleanup_entry(struct ip6t_entry *e, unsigned int *i)
|
||||
return 1;
|
||||
|
||||
/* Cleanup all matches */
|
||||
IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
|
||||
IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
|
||||
t = ip6t_get_target(e);
|
||||
|
||||
par.net = net;
|
||||
par.target = t->u.kernel.target;
|
||||
par.targinfo = t->data;
|
||||
par.family = NFPROTO_IPV6;
|
||||
@ -830,7 +847,8 @@ cleanup_entry(struct ip6t_entry *e, unsigned int *i)
|
||||
/* Checks and translates the user-supplied table segment (held in
|
||||
newinfo) */
|
||||
static int
|
||||
translate_table(const char *name,
|
||||
translate_table(struct net *net,
|
||||
const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info *newinfo,
|
||||
void *entry0,
|
||||
@ -892,11 +910,11 @@ translate_table(const char *name,
|
||||
/* Finally, each sanity check must pass */
|
||||
i = 0;
|
||||
ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
find_check_entry, name, size, &i);
|
||||
find_check_entry, net, name, size, &i);
|
||||
|
||||
if (ret != 0) {
|
||||
IP6T_ENTRY_ITERATE(entry0, newinfo->size,
|
||||
cleanup_entry, &i);
|
||||
cleanup_entry, net, &i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -972,11 +990,11 @@ get_counters(const struct xt_table_info *t,
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
static struct xt_counters *alloc_counters(const struct xt_table *table)
|
||||
{
|
||||
unsigned int countersize;
|
||||
struct xt_counters *counters;
|
||||
struct xt_table_info *private = table->private;
|
||||
const struct xt_table_info *private = table->private;
|
||||
|
||||
/* We need atomic snapshot of counters: rest doesn't change
|
||||
(other than comefrom, which userspace doesn't care
|
||||
@ -994,11 +1012,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
|
||||
|
||||
static int
|
||||
copy_entries_to_user(unsigned int total_size,
|
||||
struct xt_table *table,
|
||||
const struct xt_table *table,
|
||||
void __user *userptr)
|
||||
{
|
||||
unsigned int off, num;
|
||||
struct ip6t_entry *e;
|
||||
const struct ip6t_entry *e;
|
||||
struct xt_counters *counters;
|
||||
const struct xt_table_info *private = table->private;
|
||||
int ret = 0;
|
||||
@ -1050,7 +1068,7 @@ copy_entries_to_user(unsigned int total_size,
|
||||
}
|
||||
}
|
||||
|
||||
t = ip6t_get_target(e);
|
||||
t = ip6t_get_target_c(e);
|
||||
if (copy_to_user(userptr + off + e->target_offset
|
||||
+ offsetof(struct ip6t_entry_target,
|
||||
u.user.name),
|
||||
@ -1067,7 +1085,7 @@ copy_entries_to_user(unsigned int total_size,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static void compat_standard_from_user(void *dst, void *src)
|
||||
static void compat_standard_from_user(void *dst, const void *src)
|
||||
{
|
||||
int v = *(compat_int_t *)src;
|
||||
|
||||
@ -1076,7 +1094,7 @@ static void compat_standard_from_user(void *dst, void *src)
|
||||
memcpy(dst, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static int compat_standard_to_user(void __user *dst, void *src)
|
||||
static int compat_standard_to_user(void __user *dst, const void *src)
|
||||
{
|
||||
compat_int_t cv = *(int *)src;
|
||||
|
||||
@ -1086,24 +1104,24 @@ static int compat_standard_to_user(void __user *dst, void *src)
|
||||
}
|
||||
|
||||
static inline int
|
||||
compat_calc_match(struct ip6t_entry_match *m, int *size)
|
||||
compat_calc_match(const struct ip6t_entry_match *m, int *size)
|
||||
{
|
||||
*size += xt_compat_match_offset(m->u.kernel.match);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compat_calc_entry(struct ip6t_entry *e,
|
||||
static int compat_calc_entry(const struct ip6t_entry *e,
|
||||
const struct xt_table_info *info,
|
||||
void *base, struct xt_table_info *newinfo)
|
||||
const void *base, struct xt_table_info *newinfo)
|
||||
{
|
||||
struct ip6t_entry_target *t;
|
||||
const struct ip6t_entry_target *t;
|
||||
unsigned int entry_offset;
|
||||
int off, i, ret;
|
||||
|
||||
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
|
||||
entry_offset = (void *)e - base;
|
||||
IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
|
||||
t = ip6t_get_target(e);
|
||||
t = ip6t_get_target_c(e);
|
||||
off += xt_compat_target_offset(t->u.kernel.target);
|
||||
newinfo->size -= off;
|
||||
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
|
||||
@ -1139,7 +1157,8 @@ static int compat_table_info(const struct xt_table_info *info,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
static int get_info(struct net *net, void __user *user,
|
||||
const int *len, int compat)
|
||||
{
|
||||
char name[IP6T_TABLE_MAXNAMELEN];
|
||||
struct xt_table *t;
|
||||
@ -1199,7 +1218,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
|
||||
}
|
||||
|
||||
static int
|
||||
get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
|
||||
get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
|
||||
const int *len)
|
||||
{
|
||||
int ret;
|
||||
struct ip6t_get_entries get;
|
||||
@ -1291,7 +1311,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
||||
/* Decrease module usage counts and free resource */
|
||||
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
|
||||
NULL);
|
||||
net, NULL);
|
||||
xt_free_table_info(oldinfo);
|
||||
if (copy_to_user(counters_ptr, counters,
|
||||
sizeof(struct xt_counters) * num_counters) != 0)
|
||||
@ -1310,7 +1330,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
|
||||
}
|
||||
|
||||
static int
|
||||
do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
do_replace(struct net *net, const void __user *user, unsigned int len)
|
||||
{
|
||||
int ret;
|
||||
struct ip6t_replace tmp;
|
||||
@ -1336,7 +1356,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_table(tmp.name, tmp.valid_hooks,
|
||||
ret = translate_table(net, tmp.name, tmp.valid_hooks,
|
||||
newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
|
||||
tmp.hook_entry, tmp.underflow);
|
||||
if (ret != 0)
|
||||
@ -1351,7 +1371,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return 0;
|
||||
|
||||
free_newinfo_untrans:
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
@ -1371,7 +1391,7 @@ add_counter_to_entry(struct ip6t_entry *e,
|
||||
}
|
||||
|
||||
static int
|
||||
do_add_counters(struct net *net, void __user *user, unsigned int len,
|
||||
do_add_counters(struct net *net, const void __user *user, unsigned int len,
|
||||
int compat)
|
||||
{
|
||||
unsigned int i, curcpu;
|
||||
@ -1570,10 +1590,10 @@ static int
|
||||
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
|
||||
struct xt_table_info *newinfo,
|
||||
unsigned int *size,
|
||||
unsigned char *base,
|
||||
unsigned char *limit,
|
||||
unsigned int *hook_entries,
|
||||
unsigned int *underflows,
|
||||
const unsigned char *base,
|
||||
const unsigned char *limit,
|
||||
const unsigned int *hook_entries,
|
||||
const unsigned int *underflows,
|
||||
unsigned int *i,
|
||||
const char *name)
|
||||
{
|
||||
@ -1690,14 +1710,15 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int compat_check_entry(struct ip6t_entry *e, const char *name,
|
||||
unsigned int *i)
|
||||
static int compat_check_entry(struct ip6t_entry *e, struct net *net,
|
||||
const char *name, unsigned int *i)
|
||||
{
|
||||
unsigned int j;
|
||||
int ret;
|
||||
struct xt_mtchk_param mtpar;
|
||||
|
||||
j = 0;
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ipv6;
|
||||
mtpar.hook_mask = e->comefrom;
|
||||
@ -1706,7 +1727,7 @@ static int compat_check_entry(struct ip6t_entry *e, const char *name,
|
||||
if (ret)
|
||||
goto cleanup_matches;
|
||||
|
||||
ret = check_target(e, name);
|
||||
ret = check_target(e, net, name);
|
||||
if (ret)
|
||||
goto cleanup_matches;
|
||||
|
||||
@ -1714,12 +1735,13 @@ static int compat_check_entry(struct ip6t_entry *e, const char *name,
|
||||
return 0;
|
||||
|
||||
cleanup_matches:
|
||||
IP6T_MATCH_ITERATE(e, cleanup_match, &j);
|
||||
IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
translate_compat_table(const char *name,
|
||||
translate_compat_table(struct net *net,
|
||||
const char *name,
|
||||
unsigned int valid_hooks,
|
||||
struct xt_table_info **pinfo,
|
||||
void **pentry0,
|
||||
@ -1808,12 +1830,12 @@ translate_compat_table(const char *name,
|
||||
|
||||
i = 0;
|
||||
ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
|
||||
name, &i);
|
||||
net, name, &i);
|
||||
if (ret) {
|
||||
j -= i;
|
||||
COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
|
||||
compat_release_entry, &j);
|
||||
IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
|
||||
IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
}
|
||||
@ -1868,7 +1890,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
goto free_newinfo;
|
||||
}
|
||||
|
||||
ret = translate_compat_table(tmp.name, tmp.valid_hooks,
|
||||
ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
|
||||
&newinfo, &loc_cpu_entry, tmp.size,
|
||||
tmp.num_entries, tmp.hook_entry,
|
||||
tmp.underflow);
|
||||
@ -1884,7 +1906,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
|
||||
return 0;
|
||||
|
||||
free_newinfo_untrans:
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
|
||||
free_newinfo:
|
||||
xt_free_table_info(newinfo);
|
||||
return ret;
|
||||
@ -2121,7 +2143,7 @@ struct xt_table *ip6t_register_table(struct net *net,
|
||||
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
|
||||
memcpy(loc_cpu_entry, repl->entries, repl->size);
|
||||
|
||||
ret = translate_table(table->name, table->valid_hooks,
|
||||
ret = translate_table(net, table->name, table->valid_hooks,
|
||||
newinfo, loc_cpu_entry, repl->size,
|
||||
repl->num_entries,
|
||||
repl->hook_entry,
|
||||
@ -2142,7 +2164,7 @@ out:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void ip6t_unregister_table(struct xt_table *table)
|
||||
void ip6t_unregister_table(struct net *net, struct xt_table *table)
|
||||
{
|
||||
struct xt_table_info *private;
|
||||
void *loc_cpu_entry;
|
||||
@ -2152,7 +2174,7 @@ void ip6t_unregister_table(struct xt_table *table)
|
||||
|
||||
/* Decrease module usage counts and free resources */
|
||||
loc_cpu_entry = private->entries[raw_smp_processor_id()];
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
|
||||
IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
|
||||
if (private->number > private->initial_entries)
|
||||
module_put(table_owner);
|
||||
xt_free_table_info(private);
|
||||
|
@ -21,99 +21,26 @@ MODULE_DESCRIPTION("ip6tables filter table");
|
||||
(1 << NF_INET_FORWARD) | \
|
||||
(1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static struct
|
||||
{
|
||||
struct ip6t_replace repl;
|
||||
struct ip6t_standard entries[3];
|
||||
struct ip6t_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_filter = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV6,
|
||||
.priority = NF_IP6_PRI_FILTER,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ip6t_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv6.ip6table_filter);
|
||||
const struct net *net = dev_net((in != NULL) ? in : out);
|
||||
|
||||
return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ip6t_local_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
#if 0
|
||||
/* root is playing with raw sockets. */
|
||||
if (skb->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(skb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("ip6t_hook: happy cracking.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv6.ip6table_filter);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ip6t_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ip6t_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP6_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = NF_IP6_PRI_FILTER,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_local_out_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP6_PRI_FILTER,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *filter_ops __read_mostly;
|
||||
|
||||
/* Default to forward because I got too much mail already. */
|
||||
static int forward = NF_ACCEPT;
|
||||
@ -121,9 +48,18 @@ module_param(forward, bool, 0000);
|
||||
|
||||
static int __net_init ip6table_filter_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ip6t_replace *repl;
|
||||
|
||||
repl = ip6t_alloc_initial_table(&packet_filter);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
((struct ip6t_standard *)repl->entries)[1].target.verdict =
|
||||
-forward - 1;
|
||||
|
||||
net->ipv6.ip6table_filter =
|
||||
ip6t_register_table(net, &packet_filter, &initial_table.repl);
|
||||
ip6t_register_table(net, &packet_filter, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv6.ip6table_filter))
|
||||
return PTR_ERR(net->ipv6.ip6table_filter);
|
||||
return 0;
|
||||
@ -131,7 +67,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_filter_net_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table(net->ipv6.ip6table_filter);
|
||||
ip6t_unregister_table(net, net->ipv6.ip6table_filter);
|
||||
}
|
||||
|
||||
static struct pernet_operations ip6table_filter_net_ops = {
|
||||
@ -148,17 +84,16 @@ static int __init ip6table_filter_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Entry 1 is the FORWARD hook */
|
||||
initial_table.entries[1].target.verdict = -forward - 1;
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_filter_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
if (ret < 0)
|
||||
filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook);
|
||||
if (IS_ERR(filter_ops)) {
|
||||
ret = PTR_ERR(filter_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -169,7 +104,7 @@ static int __init ip6table_filter_init(void)
|
||||
|
||||
static void __exit ip6table_filter_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
xt_hook_unlink(&packet_filter, filter_ops);
|
||||
unregister_pernet_subsys(&ip6table_filter_net_ops);
|
||||
}
|
||||
|
||||
|
@ -21,80 +21,17 @@ MODULE_DESCRIPTION("ip6tables mangle table");
|
||||
(1 << NF_INET_LOCAL_OUT) | \
|
||||
(1 << NF_INET_POST_ROUTING))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ip6t_replace repl;
|
||||
struct ip6t_standard entries[5];
|
||||
struct ip6t_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.num_entries = 6,
|
||||
.size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
|
||||
[NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
|
||||
[NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
|
||||
[NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
},
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_mangler = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV6,
|
||||
.priority = NF_IP6_PRI_MANGLE,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ip6t_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv6.ip6table_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ip6t_post_routing_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv6.ip6table_mangle);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ip6t_local_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
|
||||
unsigned int ret;
|
||||
struct in6_addr saddr, daddr;
|
||||
u_int8_t hop_limit;
|
||||
@ -119,7 +56,7 @@ ip6t_local_out_hook(unsigned int hook,
|
||||
/* flowlabel and prio (includes version, which shouldn't change either */
|
||||
flowlabel = *((u_int32_t *)ipv6_hdr(skb));
|
||||
|
||||
ret = ip6t_do_table(skb, hook, in, out,
|
||||
ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
|
||||
dev_net(out)->ipv6.ip6table_mangle);
|
||||
|
||||
if (ret != NF_DROP && ret != NF_STOLEN &&
|
||||
@ -132,49 +69,33 @@ ip6t_local_out_hook(unsigned int hook,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ip6t_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ip6t_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_PRE_ROUTING,
|
||||
.priority = NF_IP6_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP6_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = NF_IP6_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_local_out_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP6_PRI_MANGLE,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_post_routing_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_POST_ROUTING,
|
||||
.priority = NF_IP6_PRI_MANGLE,
|
||||
},
|
||||
};
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
if (hook == NF_INET_LOCAL_OUT)
|
||||
return ip6t_mangle_out(skb, out);
|
||||
if (hook == NF_INET_POST_ROUTING)
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv6.ip6table_mangle);
|
||||
/* INPUT/FORWARD */
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv6.ip6table_mangle);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops *mangle_ops __read_mostly;
|
||||
static int __net_init ip6table_mangle_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ip6t_replace *repl;
|
||||
|
||||
repl = ip6t_alloc_initial_table(&packet_mangler);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv6.ip6table_mangle =
|
||||
ip6t_register_table(net, &packet_mangler, &initial_table.repl);
|
||||
ip6t_register_table(net, &packet_mangler, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv6.ip6table_mangle))
|
||||
return PTR_ERR(net->ipv6.ip6table_mangle);
|
||||
return 0;
|
||||
@ -182,7 +103,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_mangle_net_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table(net->ipv6.ip6table_mangle);
|
||||
ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
|
||||
}
|
||||
|
||||
static struct pernet_operations ip6table_mangle_net_ops = {
|
||||
@ -199,9 +120,11 @@ static int __init ip6table_mangle_init(void)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
if (ret < 0)
|
||||
mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
|
||||
if (IS_ERR(mangle_ops)) {
|
||||
ret = PTR_ERR(mangle_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -212,7 +135,7 @@ static int __init ip6table_mangle_init(void)
|
||||
|
||||
static void __exit ip6table_mangle_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
xt_hook_unlink(&packet_mangler, mangle_ops);
|
||||
unregister_pernet_subsys(&ip6table_mangle_net_ops);
|
||||
}
|
||||
|
||||
|
@ -8,85 +8,37 @@
|
||||
|
||||
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ip6t_replace repl;
|
||||
struct ip6t_standard entries[2];
|
||||
struct ip6t_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.num_entries = 3,
|
||||
.size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_PRE_ROUTING] = 0,
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table packet_raw = {
|
||||
.name = "raw",
|
||||
.valid_hooks = RAW_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV6,
|
||||
.priority = NF_IP6_PRI_FIRST,
|
||||
};
|
||||
|
||||
/* The work comes in here from netfilter.c. */
|
||||
static unsigned int
|
||||
ip6t_pre_routing_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in, const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv6.ip6table_raw);
|
||||
const struct net *net = dev_net((in != NULL) ? in : out);
|
||||
|
||||
return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ip6t_local_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv6.ip6table_raw);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ip6t_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ip6t_pre_routing_hook,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_PRE_ROUTING,
|
||||
.priority = NF_IP6_PRI_FIRST,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_local_out_hook,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP6_PRI_FIRST,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *rawtable_ops __read_mostly;
|
||||
|
||||
static int __net_init ip6table_raw_net_init(struct net *net)
|
||||
{
|
||||
/* Register table */
|
||||
struct ip6t_replace *repl;
|
||||
|
||||
repl = ip6t_alloc_initial_table(&packet_raw);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv6.ip6table_raw =
|
||||
ip6t_register_table(net, &packet_raw, &initial_table.repl);
|
||||
ip6t_register_table(net, &packet_raw, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv6.ip6table_raw))
|
||||
return PTR_ERR(net->ipv6.ip6table_raw);
|
||||
return 0;
|
||||
@ -94,7 +46,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_raw_net_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table(net->ipv6.ip6table_raw);
|
||||
ip6t_unregister_table(net, net->ipv6.ip6table_raw);
|
||||
}
|
||||
|
||||
static struct pernet_operations ip6table_raw_net_ops = {
|
||||
@ -111,9 +63,11 @@ static int __init ip6table_raw_init(void)
|
||||
return ret;
|
||||
|
||||
/* Register hooks */
|
||||
ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
if (ret < 0)
|
||||
rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
|
||||
if (IS_ERR(rawtable_ops)) {
|
||||
ret = PTR_ERR(rawtable_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -124,7 +78,7 @@ static int __init ip6table_raw_init(void)
|
||||
|
||||
static void __exit ip6table_raw_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
xt_hook_unlink(&packet_raw, rawtable_ops);
|
||||
unregister_pernet_subsys(&ip6table_raw_net_ops);
|
||||
}
|
||||
|
||||
|
@ -26,106 +26,37 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
|
||||
(1 << NF_INET_FORWARD) | \
|
||||
(1 << NF_INET_LOCAL_OUT)
|
||||
|
||||
static const struct
|
||||
{
|
||||
struct ip6t_replace repl;
|
||||
struct ip6t_standard entries[3];
|
||||
struct ip6t_error term;
|
||||
} initial_table __net_initdata = {
|
||||
.repl = {
|
||||
.name = "security",
|
||||
.valid_hooks = SECURITY_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
|
||||
.hook_entry = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_INET_LOCAL_IN] = 0,
|
||||
[NF_INET_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static const struct xt_table security_table = {
|
||||
.name = "security",
|
||||
.valid_hooks = SECURITY_VALID_HOOKS,
|
||||
.me = THIS_MODULE,
|
||||
.af = NFPROTO_IPV6,
|
||||
.priority = NF_IP6_PRI_SECURITY,
|
||||
};
|
||||
|
||||
static unsigned int
|
||||
ip6t_local_in_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv6.ip6table_security);
|
||||
const struct net *net = dev_net((in != NULL) ? in : out);
|
||||
|
||||
return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ip6t_forward_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(in)->ipv6.ip6table_security);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ip6t_local_out_hook(unsigned int hook,
|
||||
struct sk_buff *skb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* TBD: handle short packets via raw socket */
|
||||
return ip6t_do_table(skb, hook, in, out,
|
||||
dev_net(out)->ipv6.ip6table_security);
|
||||
}
|
||||
|
||||
static struct nf_hook_ops ip6t_ops[] __read_mostly = {
|
||||
{
|
||||
.hook = ip6t_local_in_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_IN,
|
||||
.priority = NF_IP6_PRI_SECURITY,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_forward_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = NF_IP6_PRI_SECURITY,
|
||||
},
|
||||
{
|
||||
.hook = ip6t_local_out_hook,
|
||||
.owner = THIS_MODULE,
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_LOCAL_OUT,
|
||||
.priority = NF_IP6_PRI_SECURITY,
|
||||
},
|
||||
};
|
||||
static struct nf_hook_ops *sectbl_ops __read_mostly;
|
||||
|
||||
static int __net_init ip6table_security_net_init(struct net *net)
|
||||
{
|
||||
net->ipv6.ip6table_security =
|
||||
ip6t_register_table(net, &security_table, &initial_table.repl);
|
||||
struct ip6t_replace *repl;
|
||||
|
||||
repl = ip6t_alloc_initial_table(&security_table);
|
||||
if (repl == NULL)
|
||||
return -ENOMEM;
|
||||
net->ipv6.ip6table_security =
|
||||
ip6t_register_table(net, &security_table, repl);
|
||||
kfree(repl);
|
||||
if (IS_ERR(net->ipv6.ip6table_security))
|
||||
return PTR_ERR(net->ipv6.ip6table_security);
|
||||
|
||||
@ -134,7 +65,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
|
||||
|
||||
static void __net_exit ip6table_security_net_exit(struct net *net)
|
||||
{
|
||||
ip6t_unregister_table(net->ipv6.ip6table_security);
|
||||
ip6t_unregister_table(net, net->ipv6.ip6table_security);
|
||||
}
|
||||
|
||||
static struct pernet_operations ip6table_security_net_ops = {
|
||||
@ -150,9 +81,11 @@ static int __init ip6table_security_init(void)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
if (ret < 0)
|
||||
sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
|
||||
if (IS_ERR(sectbl_ops)) {
|
||||
ret = PTR_ERR(sectbl_ops);
|
||||
goto cleanup_table;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -163,7 +96,7 @@ cleanup_table:
|
||||
|
||||
static void __exit ip6table_security_fini(void)
|
||||
{
|
||||
nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
|
||||
xt_hook_unlink(&security_table, sectbl_ops);
|
||||
unregister_pernet_subsys(&ip6table_security_net_ops);
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_l3proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
@ -191,15 +192,20 @@ out:
|
||||
static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u16 zone = NF_CT_DEFAULT_ZONE;
|
||||
|
||||
if (skb->nfct)
|
||||
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
|
||||
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
if (skb->nf_bridge &&
|
||||
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
|
||||
return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
|
||||
return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
|
||||
#endif
|
||||
if (hooknum == NF_INET_PRE_ROUTING)
|
||||
return IP6_DEFRAG_CONNTRACK_IN;
|
||||
return IP6_DEFRAG_CONNTRACK_IN + zone;
|
||||
else
|
||||
return IP6_DEFRAG_CONNTRACK_OUT;
|
||||
return IP6_DEFRAG_CONNTRACK_OUT + zone;
|
||||
|
||||
}
|
||||
|
||||
@ -212,7 +218,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
|
||||
struct sk_buff *reasm;
|
||||
|
||||
/* Previously seen (loopback)? */
|
||||
if (skb->nfct)
|
||||
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
|
||||
return NF_ACCEPT;
|
||||
|
||||
reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
|
||||
#include <net/netfilter/nf_log.h>
|
||||
|
||||
@ -128,7 +129,7 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static int
|
||||
icmpv6_error_message(struct net *net,
|
||||
icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb,
|
||||
unsigned int icmp6off,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
@ -137,6 +138,7 @@ icmpv6_error_message(struct net *net,
|
||||
struct nf_conntrack_tuple intuple, origtuple;
|
||||
const struct nf_conntrack_tuple_hash *h;
|
||||
const struct nf_conntrack_l4proto *inproto;
|
||||
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
||||
|
||||
NF_CT_ASSERT(skb->nfct == NULL);
|
||||
|
||||
@ -163,7 +165,7 @@ icmpv6_error_message(struct net *net,
|
||||
|
||||
*ctinfo = IP_CT_RELATED;
|
||||
|
||||
h = nf_conntrack_find_get(net, &intuple);
|
||||
h = nf_conntrack_find_get(net, zone, &intuple);
|
||||
if (!h) {
|
||||
pr_debug("icmpv6_error: no match\n");
|
||||
return -NF_ACCEPT;
|
||||
@ -179,7 +181,8 @@ icmpv6_error_message(struct net *net,
|
||||
}
|
||||
|
||||
static int
|
||||
icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
icmpv6_error(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
|
||||
{
|
||||
const struct icmp6hdr *icmp6h;
|
||||
@ -215,7 +218,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
if (icmp6h->icmp6_type >= 128)
|
||||
return NF_ACCEPT;
|
||||
|
||||
return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum);
|
||||
return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
|
@ -45,9 +45,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
|
||||
#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
|
||||
#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
|
||||
|
||||
struct nf_ct_frag6_skb_cb
|
||||
{
|
||||
@ -670,8 +667,8 @@ int nf_ct_frag6_init(void)
|
||||
nf_frags.frag_expire = nf_ct_frag6_expire;
|
||||
nf_frags.secret_interval = 10 * 60 * HZ;
|
||||
nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
|
||||
nf_init_frags.high_thresh = 256 * 1024;
|
||||
nf_init_frags.low_thresh = 192 * 1024;
|
||||
nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
|
||||
nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
|
||||
inet_frags_init_net(&nf_init_frags);
|
||||
inet_frags_init(&nf_frags);
|
||||
|
||||
|
@ -742,8 +742,8 @@ static inline void ip6_frags_sysctl_unregister(void)
|
||||
|
||||
static int __net_init ipv6_frags_init_net(struct net *net)
|
||||
{
|
||||
net->ipv6.frags.high_thresh = 256 * 1024;
|
||||
net->ipv6.frags.low_thresh = 192 * 1024;
|
||||
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
|
||||
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
|
||||
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
|
||||
|
||||
inet_frags_init_net(&net->ipv6.frags);
|
||||
|
@ -83,6 +83,19 @@ config NF_CONNTRACK_SECMARK
|
||||
|
||||
If unsure, say 'N'.
|
||||
|
||||
config NF_CONNTRACK_ZONES
|
||||
bool 'Connection tracking zones'
|
||||
depends on NETFILTER_ADVANCED
|
||||
depends on NETFILTER_XT_TARGET_CT
|
||||
help
|
||||
This option enables support for connection tracking zones.
|
||||
Normally, each connection needs to have a unique system wide
|
||||
identity. Connection tracking zones allow to have multiple
|
||||
connections using the same identity, as long as they are
|
||||
contained in different zones.
|
||||
|
||||
If unsure, say `N'.
|
||||
|
||||
config NF_CONNTRACK_EVENTS
|
||||
bool "Connection tracking events"
|
||||
depends on NETFILTER_ADVANCED
|
||||
@ -341,6 +354,18 @@ config NETFILTER_XT_TARGET_CONNSECMARK
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
config NETFILTER_XT_TARGET_CT
|
||||
tristate '"CT" target support'
|
||||
depends on NF_CONNTRACK
|
||||
depends on IP_NF_RAW || IP6_NF_RAW
|
||||
depends on NETFILTER_ADVANCED
|
||||
help
|
||||
This options adds a `CT' target, which allows to specify initial
|
||||
connection tracking parameters like events to be delivered and
|
||||
the helper to be used.
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
config NETFILTER_XT_TARGET_DSCP
|
||||
tristate '"DSCP" and "TOS" target support'
|
||||
depends on IP_NF_MANGLE || IP6_NF_MANGLE
|
||||
|
@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
|
||||
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
|
||||
|
@ -68,6 +68,10 @@ config IP_VS_TAB_BITS
|
||||
each hash entry uses 8 bytes, so you can estimate how much memory is
|
||||
needed for your box.
|
||||
|
||||
You can overwrite this number setting conn_tab_bits module parameter
|
||||
or by appending ip_vs.conn_tab_bits=? to the kernel command line
|
||||
if IP VS was compiled built-in.
|
||||
|
||||
comment "IPVS transport protocol load balancing support"
|
||||
|
||||
config IP_VS_PROTO_TCP
|
||||
|
@ -40,6 +40,21 @@
|
||||
#include <net/ip_vs.h>
|
||||
|
||||
|
||||
#ifndef CONFIG_IP_VS_TAB_BITS
|
||||
#define CONFIG_IP_VS_TAB_BITS 12
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Connection hash size. Default is what was selected at compile time.
|
||||
*/
|
||||
int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
|
||||
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
|
||||
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
|
||||
|
||||
/* size and mask values */
|
||||
int ip_vs_conn_tab_size;
|
||||
int ip_vs_conn_tab_mask;
|
||||
|
||||
/*
|
||||
* Connection hash table: for input and output packets lookups of IPVS
|
||||
*/
|
||||
@ -125,11 +140,11 @@ static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
|
||||
if (af == AF_INET6)
|
||||
return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
|
||||
(__force u32)port, proto, ip_vs_conn_rnd)
|
||||
& IP_VS_CONN_TAB_MASK;
|
||||
& ip_vs_conn_tab_mask;
|
||||
#endif
|
||||
return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
|
||||
ip_vs_conn_rnd)
|
||||
& IP_VS_CONN_TAB_MASK;
|
||||
& ip_vs_conn_tab_mask;
|
||||
}
|
||||
|
||||
|
||||
@ -760,7 +775,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
|
||||
int idx;
|
||||
struct ip_vs_conn *cp;
|
||||
|
||||
for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
|
||||
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
|
||||
ct_read_lock_bh(idx);
|
||||
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
|
||||
if (pos-- == 0) {
|
||||
@ -797,7 +812,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
idx = l - ip_vs_conn_tab;
|
||||
ct_read_unlock_bh(idx);
|
||||
|
||||
while (++idx < IP_VS_CONN_TAB_SIZE) {
|
||||
while (++idx < ip_vs_conn_tab_size) {
|
||||
ct_read_lock_bh(idx);
|
||||
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
|
||||
seq->private = &ip_vs_conn_tab[idx];
|
||||
@ -976,8 +991,8 @@ void ip_vs_random_dropentry(void)
|
||||
/*
|
||||
* Randomly scan 1/32 of the whole table every second
|
||||
*/
|
||||
for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) {
|
||||
unsigned hash = net_random() & IP_VS_CONN_TAB_MASK;
|
||||
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
|
||||
unsigned hash = net_random() & ip_vs_conn_tab_mask;
|
||||
|
||||
/*
|
||||
* Lock is actually needed in this loop.
|
||||
@ -1029,7 +1044,7 @@ static void ip_vs_conn_flush(void)
|
||||
struct ip_vs_conn *cp;
|
||||
|
||||
flush_again:
|
||||
for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) {
|
||||
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
|
||||
/*
|
||||
* Lock is actually needed in this loop.
|
||||
*/
|
||||
@ -1060,10 +1075,15 @@ int __init ip_vs_conn_init(void)
|
||||
{
|
||||
int idx;
|
||||
|
||||
/* Compute size and mask */
|
||||
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
|
||||
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
|
||||
|
||||
/*
|
||||
* Allocate the connection hash table and initialize its list heads
|
||||
*/
|
||||
ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head));
|
||||
ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
|
||||
sizeof(struct list_head));
|
||||
if (!ip_vs_conn_tab)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1078,12 +1098,12 @@ int __init ip_vs_conn_init(void)
|
||||
|
||||
pr_info("Connection hash table configured "
|
||||
"(size=%d, memory=%ldKbytes)\n",
|
||||
IP_VS_CONN_TAB_SIZE,
|
||||
(long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024);
|
||||
ip_vs_conn_tab_size,
|
||||
(long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
|
||||
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
|
||||
sizeof(struct ip_vs_conn));
|
||||
|
||||
for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
|
||||
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
|
||||
INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
|
||||
}
|
||||
|
||||
|
@ -1843,7 +1843,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_printf(seq,
|
||||
"IP Virtual Server version %d.%d.%d (size=%d)\n",
|
||||
NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
|
||||
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
|
||||
seq_puts(seq,
|
||||
"Prot LocalAddress:Port Scheduler Flags\n");
|
||||
seq_puts(seq,
|
||||
@ -2386,7 +2386,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
||||
char buf[64];
|
||||
|
||||
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
|
||||
NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
|
||||
NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
|
||||
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
@ -2399,7 +2399,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
||||
{
|
||||
struct ip_vs_getinfo info;
|
||||
info.version = IP_VS_VERSION_CODE;
|
||||
info.size = IP_VS_CONN_TAB_SIZE;
|
||||
info.size = ip_vs_conn_tab_size;
|
||||
info.num_services = ip_vs_num_services;
|
||||
if (copy_to_user(user, &info, sizeof(info)) != 0)
|
||||
ret = -EFAULT;
|
||||
@ -3243,7 +3243,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
|
||||
case IPVS_CMD_GET_INFO:
|
||||
NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
|
||||
NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
|
||||
IP_VS_CONN_TAB_SIZE);
|
||||
ip_vs_conn_tab_size);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
|
||||
*/
|
||||
from.ip = n_cp->vaddr.ip;
|
||||
port = n_cp->vport;
|
||||
sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip),
|
||||
sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
|
||||
(ntohs(port)>>8)&255, ntohs(port)&255);
|
||||
buf_len = strlen(buf);
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <net/netfilter/nf_conntrack_extend.h>
|
||||
#include <net/netfilter/nf_conntrack_acct.h>
|
||||
#include <net/netfilter/nf_conntrack_ecache.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <net/netfilter/nf_nat.h>
|
||||
#include <net/netfilter/nf_nat_core.h>
|
||||
|
||||
@ -68,7 +69,7 @@ static int nf_conntrack_hash_rnd_initted;
|
||||
static unsigned int nf_conntrack_hash_rnd;
|
||||
|
||||
static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
|
||||
unsigned int size, unsigned int rnd)
|
||||
u16 zone, unsigned int size, unsigned int rnd)
|
||||
{
|
||||
unsigned int n;
|
||||
u_int32_t h;
|
||||
@ -79,16 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
|
||||
*/
|
||||
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
|
||||
h = jhash2((u32 *)tuple, n,
|
||||
rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
|
||||
tuple->dst.protonum));
|
||||
zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
|
||||
tuple->dst.protonum));
|
||||
|
||||
return ((u64)h * size) >> 32;
|
||||
}
|
||||
|
||||
static inline u_int32_t hash_conntrack(const struct net *net,
|
||||
static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
return __hash_conntrack(tuple, net->ct.htable_size,
|
||||
return __hash_conntrack(tuple, zone, net->ct.htable_size,
|
||||
nf_conntrack_hash_rnd);
|
||||
}
|
||||
|
||||
@ -292,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
|
||||
* - Caller must lock nf_conntrack_lock before calling this function
|
||||
*/
|
||||
struct nf_conntrack_tuple_hash *
|
||||
__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
__nf_conntrack_find(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
unsigned int hash = hash_conntrack(net, tuple);
|
||||
unsigned int hash = hash_conntrack(net, zone, tuple);
|
||||
|
||||
/* Disable BHs the entire time since we normally need to disable them
|
||||
* at least once for the stats anyway.
|
||||
@ -304,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
local_bh_disable();
|
||||
begin:
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
|
||||
if (nf_ct_tuple_equal(tuple, &h->tuple)) {
|
||||
if (nf_ct_tuple_equal(tuple, &h->tuple) &&
|
||||
nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
|
||||
NF_CT_STAT_INC(net, found);
|
||||
local_bh_enable();
|
||||
return h;
|
||||
@ -326,21 +329,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
|
||||
|
||||
/* Find a connection corresponding to a tuple. */
|
||||
struct nf_conntrack_tuple_hash *
|
||||
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
nf_conntrack_find_get(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conn *ct;
|
||||
|
||||
rcu_read_lock();
|
||||
begin:
|
||||
h = __nf_conntrack_find(net, tuple);
|
||||
h = __nf_conntrack_find(net, zone, tuple);
|
||||
if (h) {
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (unlikely(nf_ct_is_dying(ct) ||
|
||||
!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
h = NULL;
|
||||
else {
|
||||
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
|
||||
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
|
||||
nf_ct_zone(ct) != zone)) {
|
||||
nf_ct_put(ct);
|
||||
goto begin;
|
||||
}
|
||||
@ -368,9 +373,11 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
unsigned int hash, repl_hash;
|
||||
u16 zone;
|
||||
|
||||
hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
zone = nf_ct_zone(ct);
|
||||
hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
|
||||
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
||||
}
|
||||
@ -387,6 +394,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||
struct hlist_nulls_node *n;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct net *net;
|
||||
u16 zone;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
net = nf_ct_net(ct);
|
||||
@ -398,8 +406,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
|
||||
return NF_ACCEPT;
|
||||
|
||||
hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
zone = nf_ct_zone(ct);
|
||||
hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||
repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
|
||||
/* We're not in hash table, and we refuse to set up related
|
||||
connections for unconfirmed conns. But packet copies and
|
||||
@ -418,11 +427,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||
not in the hash. If there is, we lost race. */
|
||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
|
||||
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
&h->tuple))
|
||||
&h->tuple) &&
|
||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||
goto out;
|
||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
|
||||
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
||||
&h->tuple))
|
||||
&h->tuple) &&
|
||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||
goto out;
|
||||
|
||||
/* Remove from unconfirmed list */
|
||||
@ -469,15 +480,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
||||
struct net *net = nf_ct_net(ignored_conntrack);
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
unsigned int hash = hash_conntrack(net, tuple);
|
||||
struct nf_conn *ct;
|
||||
u16 zone = nf_ct_zone(ignored_conntrack);
|
||||
unsigned int hash = hash_conntrack(net, zone, tuple);
|
||||
|
||||
/* Disable BHs the entire time since we need to disable them at
|
||||
* least once for the stats anyway.
|
||||
*/
|
||||
rcu_read_lock_bh();
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
|
||||
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
|
||||
nf_ct_tuple_equal(tuple, &h->tuple)) {
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (ct != ignored_conntrack &&
|
||||
nf_ct_tuple_equal(tuple, &h->tuple) &&
|
||||
nf_ct_zone(ct) == zone) {
|
||||
NF_CT_STAT_INC(net, found);
|
||||
rcu_read_unlock_bh();
|
||||
return 1;
|
||||
@ -540,7 +555,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
|
||||
return dropped;
|
||||
}
|
||||
|
||||
struct nf_conn *nf_conntrack_alloc(struct net *net,
|
||||
struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *orig,
|
||||
const struct nf_conntrack_tuple *repl,
|
||||
gfp_t gfp)
|
||||
@ -558,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
|
||||
|
||||
if (nf_conntrack_max &&
|
||||
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
|
||||
unsigned int hash = hash_conntrack(net, orig);
|
||||
unsigned int hash = hash_conntrack(net, zone, orig);
|
||||
if (!early_drop(net, hash)) {
|
||||
atomic_dec(&net->ct.count);
|
||||
if (net_ratelimit())
|
||||
@ -595,13 +610,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
|
||||
#ifdef CONFIG_NET_NS
|
||||
ct->ct_net = net;
|
||||
#endif
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
if (zone) {
|
||||
struct nf_conntrack_zone *nf_ct_zone;
|
||||
|
||||
nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
|
||||
if (!nf_ct_zone)
|
||||
goto out_free;
|
||||
nf_ct_zone->id = zone;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* changes to lookup keys must be done before setting refcnt to 1
|
||||
*/
|
||||
smp_wmb();
|
||||
atomic_set(&ct->ct_general.use, 1);
|
||||
return ct;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
out_free:
|
||||
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
|
||||
|
||||
@ -619,7 +649,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
|
||||
/* Allocate a new conntrack: we return -ENOMEM if classification
|
||||
failed due to stress. Otherwise it really is unclassifiable. */
|
||||
static struct nf_conntrack_tuple_hash *
|
||||
init_conntrack(struct net *net,
|
||||
init_conntrack(struct net *net, struct nf_conn *tmpl,
|
||||
const struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_l3proto *l3proto,
|
||||
struct nf_conntrack_l4proto *l4proto,
|
||||
@ -629,14 +659,16 @@ init_conntrack(struct net *net,
|
||||
struct nf_conn *ct;
|
||||
struct nf_conn_help *help;
|
||||
struct nf_conntrack_tuple repl_tuple;
|
||||
struct nf_conntrack_ecache *ecache;
|
||||
struct nf_conntrack_expect *exp;
|
||||
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
||||
|
||||
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
|
||||
pr_debug("Can't invert tuple.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
|
||||
ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
|
||||
if (IS_ERR(ct)) {
|
||||
pr_debug("Can't allocate conntrack.\n");
|
||||
return (struct nf_conntrack_tuple_hash *)ct;
|
||||
@ -649,10 +681,14 @@ init_conntrack(struct net *net,
|
||||
}
|
||||
|
||||
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
|
||||
nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
|
||||
|
||||
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
|
||||
nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
|
||||
ecache ? ecache->expmask : 0,
|
||||
GFP_ATOMIC);
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
exp = nf_ct_find_expectation(net, tuple);
|
||||
exp = nf_ct_find_expectation(net, zone, tuple);
|
||||
if (exp) {
|
||||
pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
|
||||
ct, exp);
|
||||
@ -674,7 +710,7 @@ init_conntrack(struct net *net,
|
||||
nf_conntrack_get(&ct->master->ct_general);
|
||||
NF_CT_STAT_INC(net, expect_new);
|
||||
} else {
|
||||
__nf_ct_try_assign_helper(ct, GFP_ATOMIC);
|
||||
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
|
||||
NF_CT_STAT_INC(net, new);
|
||||
}
|
||||
|
||||
@ -695,7 +731,7 @@ init_conntrack(struct net *net,
|
||||
|
||||
/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
|
||||
static inline struct nf_conn *
|
||||
resolve_normal_ct(struct net *net,
|
||||
resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
u_int16_t l3num,
|
||||
@ -708,6 +744,7 @@ resolve_normal_ct(struct net *net,
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conn *ct;
|
||||
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
|
||||
|
||||
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
|
||||
dataoff, l3num, protonum, &tuple, l3proto,
|
||||
@ -717,9 +754,10 @@ resolve_normal_ct(struct net *net,
|
||||
}
|
||||
|
||||
/* look for tuple match */
|
||||
h = nf_conntrack_find_get(net, &tuple);
|
||||
h = nf_conntrack_find_get(net, zone, &tuple);
|
||||
if (!h) {
|
||||
h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
|
||||
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
|
||||
skb, dataoff);
|
||||
if (!h)
|
||||
return NULL;
|
||||
if (IS_ERR(h))
|
||||
@ -756,7 +794,7 @@ unsigned int
|
||||
nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct nf_conn *ct;
|
||||
struct nf_conn *ct, *tmpl = NULL;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conntrack_l3proto *l3proto;
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
@ -765,10 +803,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||
int set_reply = 0;
|
||||
int ret;
|
||||
|
||||
/* Previously seen (loopback or untracked)? Ignore. */
|
||||
if (skb->nfct) {
|
||||
NF_CT_STAT_INC_ATOMIC(net, ignore);
|
||||
return NF_ACCEPT;
|
||||
/* Previously seen (loopback or untracked)? Ignore. */
|
||||
tmpl = (struct nf_conn *)skb->nfct;
|
||||
if (!nf_ct_is_template(tmpl)) {
|
||||
NF_CT_STAT_INC_ATOMIC(net, ignore);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
skb->nfct = NULL;
|
||||
}
|
||||
|
||||
/* rcu_read_lock()ed by nf_hook_slow */
|
||||
@ -779,7 +821,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||
pr_debug("not prepared to track yet or error occured\n");
|
||||
NF_CT_STAT_INC_ATOMIC(net, error);
|
||||
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
||||
return -ret;
|
||||
ret = -ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
l4proto = __nf_ct_l4proto_find(pf, protonum);
|
||||
@ -788,26 +831,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||
* inverse of the return code tells to the netfilter
|
||||
* core what to do with the packet. */
|
||||
if (l4proto->error != NULL) {
|
||||
ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
|
||||
ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
|
||||
pf, hooknum);
|
||||
if (ret <= 0) {
|
||||
NF_CT_STAT_INC_ATOMIC(net, error);
|
||||
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
||||
return -ret;
|
||||
ret = -ret;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
|
||||
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
|
||||
l3proto, l4proto, &set_reply, &ctinfo);
|
||||
if (!ct) {
|
||||
/* Not valid part of a connection */
|
||||
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
||||
return NF_ACCEPT;
|
||||
ret = NF_ACCEPT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (IS_ERR(ct)) {
|
||||
/* Too stressed to deal. */
|
||||
NF_CT_STAT_INC_ATOMIC(net, drop);
|
||||
return NF_DROP;
|
||||
ret = NF_DROP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
NF_CT_ASSERT(skb->nfct);
|
||||
@ -822,11 +869,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
||||
if (ret == -NF_DROP)
|
||||
NF_CT_STAT_INC_ATOMIC(net, drop);
|
||||
return -ret;
|
||||
ret = -ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
|
||||
nf_conntrack_event_cache(IPCT_STATUS, ct);
|
||||
nf_conntrack_event_cache(IPCT_REPLY, ct);
|
||||
out:
|
||||
if (tmpl)
|
||||
nf_ct_put(tmpl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -865,7 +916,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
__nf_ct_try_assign_helper(ct, GFP_ATOMIC);
|
||||
__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
|
||||
@ -939,6 +990,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
|
||||
.len = sizeof(struct nf_conntrack_zone),
|
||||
.align = __alignof__(struct nf_conntrack_zone),
|
||||
.id = NF_CT_EXT_ZONE,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
|
||||
|
||||
#include <linux/netfilter/nfnetlink.h>
|
||||
@ -1120,6 +1179,9 @@ static void nf_conntrack_cleanup_init_net(void)
|
||||
|
||||
nf_conntrack_helper_fini();
|
||||
nf_conntrack_proto_fini();
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
nf_ct_extend_unregister(&nf_ct_zone_extend);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void nf_conntrack_cleanup_net(struct net *net)
|
||||
@ -1195,6 +1257,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
|
||||
unsigned int hashsize, old_size;
|
||||
struct hlist_nulls_head *hash, *old_hash;
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conn *ct;
|
||||
|
||||
if (current->nsproxy->net_ns != &init_net)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1221,8 +1284,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
|
||||
while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
|
||||
h = hlist_nulls_entry(init_net.ct.hash[i].first,
|
||||
struct nf_conntrack_tuple_hash, hnnode);
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
hlist_nulls_del_rcu(&h->hnnode);
|
||||
bucket = __hash_conntrack(&h->tuple, hashsize,
|
||||
bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
|
||||
hashsize,
|
||||
nf_conntrack_hash_rnd);
|
||||
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
|
||||
}
|
||||
@ -1280,6 +1345,11 @@ static int nf_conntrack_init_init_net(void)
|
||||
if (ret < 0)
|
||||
goto err_helper;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
ret = nf_ct_extend_register(&nf_ct_zone_extend);
|
||||
if (ret < 0)
|
||||
goto err_extend;
|
||||
#endif
|
||||
/* Set up fake conntrack: to never be deleted, not in any hashes */
|
||||
#ifdef CONFIG_NET_NS
|
||||
nf_conntrack_untracked.ct_net = &init_net;
|
||||
@ -1290,6 +1360,10 @@ static int nf_conntrack_init_init_net(void)
|
||||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
err_extend:
|
||||
nf_conntrack_helper_fini();
|
||||
#endif
|
||||
err_helper:
|
||||
nf_conntrack_proto_fini();
|
||||
err_proto:
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
|
||||
unsigned int nf_ct_expect_hsize __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
|
||||
@ -84,7 +85,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
|
||||
}
|
||||
|
||||
struct nf_conntrack_expect *
|
||||
__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
__nf_ct_expect_find(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_expect *i;
|
||||
struct hlist_node *n;
|
||||
@ -95,7 +97,8 @@ __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
|
||||
h = nf_ct_expect_dst_hash(tuple);
|
||||
hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
|
||||
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
|
||||
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
|
||||
nf_ct_zone(i->master) == zone)
|
||||
return i;
|
||||
}
|
||||
return NULL;
|
||||
@ -104,12 +107,13 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
|
||||
|
||||
/* Just find a expectation corresponding to a tuple. */
|
||||
struct nf_conntrack_expect *
|
||||
nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
nf_ct_expect_find_get(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_expect *i;
|
||||
|
||||
rcu_read_lock();
|
||||
i = __nf_ct_expect_find(net, tuple);
|
||||
i = __nf_ct_expect_find(net, zone, tuple);
|
||||
if (i && !atomic_inc_not_zero(&i->use))
|
||||
i = NULL;
|
||||
rcu_read_unlock();
|
||||
@ -121,7 +125,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
|
||||
/* If an expectation for this connection is found, it gets delete from
|
||||
* global list then returned. */
|
||||
struct nf_conntrack_expect *
|
||||
nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
nf_ct_find_expectation(struct net *net, u16 zone,
|
||||
const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
struct nf_conntrack_expect *i, *exp = NULL;
|
||||
struct hlist_node *n;
|
||||
@ -133,7 +138,8 @@ nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
|
||||
h = nf_ct_expect_dst_hash(tuple);
|
||||
hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
|
||||
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
|
||||
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
|
||||
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
|
||||
nf_ct_zone(i->master) == zone) {
|
||||
exp = i;
|
||||
break;
|
||||
}
|
||||
@ -204,7 +210,8 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
|
||||
{
|
||||
return a->master == b->master && a->class == b->class &&
|
||||
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
|
||||
nf_ct_tuple_mask_equal(&a->mask, &b->mask);
|
||||
nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
|
||||
nf_ct_zone(a->master) == nf_ct_zone(b->master);
|
||||
}
|
||||
|
||||
/* Generally a bad idea to call this: could have matched already. */
|
||||
@ -232,7 +239,6 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
|
||||
|
||||
new->master = me;
|
||||
atomic_set(&new->use, 1);
|
||||
INIT_RCU_HEAD(&new->rcu);
|
||||
return new;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
|
||||
@ -500,6 +506,7 @@ static void exp_seq_stop(struct seq_file *seq, void *v)
|
||||
static int exp_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nf_conntrack_expect *expect;
|
||||
struct nf_conntrack_helper *helper;
|
||||
struct hlist_node *n = v;
|
||||
char *delim = "";
|
||||
|
||||
@ -525,6 +532,14 @@ static int exp_seq_show(struct seq_file *s, void *v)
|
||||
if (expect->flags & NF_CT_EXPECT_INACTIVE)
|
||||
seq_printf(s, "%sINACTIVE", delim);
|
||||
|
||||
helper = rcu_dereference(nfct_help(expect->master)->helper);
|
||||
if (helper) {
|
||||
seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
|
||||
if (helper->expect_policy[expect->class].name)
|
||||
seq_printf(s, "/%s",
|
||||
helper->expect_policy[expect->class].name);
|
||||
}
|
||||
|
||||
return seq_putc(s, '\n');
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
|
||||
if (!*ext)
|
||||
return NULL;
|
||||
|
||||
INIT_RCU_HEAD(&(*ext)->rcu);
|
||||
(*ext)->offset[id] = off;
|
||||
(*ext)->len = len;
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_ecache.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <linux/netfilter/nf_conntrack_h323.h>
|
||||
|
||||
/* Parameters */
|
||||
@ -1216,7 +1217,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
|
||||
tuple.dst.u.tcp.port = port;
|
||||
tuple.dst.protonum = IPPROTO_TCP;
|
||||
|
||||
exp = __nf_ct_expect_find(net, &tuple);
|
||||
exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
|
||||
if (exp && exp->master == ct)
|
||||
return exp;
|
||||
return NULL;
|
||||
|
@ -65,7 +65,7 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
|
||||
}
|
||||
|
||||
struct nf_conntrack_helper *
|
||||
__nf_conntrack_helper_find_byname(const char *name)
|
||||
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
|
||||
{
|
||||
struct nf_conntrack_helper *h;
|
||||
struct hlist_node *n;
|
||||
@ -73,13 +73,34 @@ __nf_conntrack_helper_find_byname(const char *name)
|
||||
|
||||
for (i = 0; i < nf_ct_helper_hsize; i++) {
|
||||
hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
|
||||
if (!strcmp(h->name, name))
|
||||
if (!strcmp(h->name, name) &&
|
||||
h->tuple.src.l3num == l3num &&
|
||||
h->tuple.dst.protonum == protonum)
|
||||
return h;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname);
|
||||
EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
|
||||
|
||||
struct nf_conntrack_helper *
|
||||
nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
|
||||
{
|
||||
struct nf_conntrack_helper *h;
|
||||
|
||||
h = __nf_conntrack_helper_find(name, l3num, protonum);
|
||||
#ifdef CONFIG_MODULES
|
||||
if (h == NULL) {
|
||||
if (request_module("nfct-helper-%s", name) == 0)
|
||||
h = __nf_conntrack_helper_find(name, l3num, protonum);
|
||||
}
|
||||
#endif
|
||||
if (h != NULL && !try_module_get(h->me))
|
||||
h = NULL;
|
||||
|
||||
return h;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
|
||||
|
||||
struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
|
||||
{
|
||||
@ -94,13 +115,22 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
|
||||
|
||||
int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags)
|
||||
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct nf_conntrack_helper *helper = NULL;
|
||||
struct nf_conn_help *help;
|
||||
int ret = 0;
|
||||
struct nf_conntrack_helper *helper;
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
|
||||
helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
if (tmpl != NULL) {
|
||||
help = nfct_help(tmpl);
|
||||
if (help != NULL)
|
||||
helper = help->helper;
|
||||
}
|
||||
|
||||
help = nfct_help(ct);
|
||||
if (helper == NULL)
|
||||
helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||
if (helper == NULL) {
|
||||
if (help)
|
||||
rcu_assign_pointer(help->helper, NULL);
|
||||
|
@ -30,6 +30,7 @@
|
||||
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
@ -38,6 +39,7 @@
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
#include <net/netfilter/nf_conntrack_acct.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#ifdef CONFIG_NF_NAT_NEEDED
|
||||
#include <net/netfilter/nf_nat_core.h>
|
||||
#include <net/netfilter/nf_nat_protocol.h>
|
||||
@ -378,6 +380,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
goto nla_put_failure;
|
||||
nla_nest_end(skb, nest_parms);
|
||||
|
||||
if (nf_ct_zone(ct))
|
||||
NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
|
||||
|
||||
if (ctnetlink_dump_status(skb, ct) < 0 ||
|
||||
ctnetlink_dump_timeout(skb, ct) < 0 ||
|
||||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
|
||||
@ -456,6 +461,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
|
||||
static int
|
||||
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
|
||||
{
|
||||
struct net *net;
|
||||
struct nlmsghdr *nlh;
|
||||
struct nfgenmsg *nfmsg;
|
||||
struct nlattr *nest_parms;
|
||||
@ -482,7 +488,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
|
||||
} else
|
||||
return 0;
|
||||
|
||||
if (!item->report && !nfnetlink_has_listeners(group))
|
||||
net = nf_ct_net(ct);
|
||||
if (!item->report && !nfnetlink_has_listeners(net, group))
|
||||
return 0;
|
||||
|
||||
skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
|
||||
@ -514,6 +521,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
|
||||
goto nla_put_failure;
|
||||
nla_nest_end(skb, nest_parms);
|
||||
|
||||
if (nf_ct_zone(ct))
|
||||
NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
|
||||
|
||||
if (ctnetlink_dump_id(skb, ct) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
@ -559,7 +569,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
|
||||
rcu_read_unlock();
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC);
|
||||
err = nfnetlink_send(skb, net, item->pid, group, item->report,
|
||||
GFP_ATOMIC);
|
||||
if (err == -ENOBUFS || err == -EAGAIN)
|
||||
return -ENOBUFS;
|
||||
|
||||
@ -571,7 +582,7 @@ nla_put_failure:
|
||||
nlmsg_failure:
|
||||
kfree_skb(skb);
|
||||
errout:
|
||||
nfnetlink_set_err(0, group, -ENOBUFS);
|
||||
nfnetlink_set_err(net, 0, group, -ENOBUFS);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
|
||||
@ -586,6 +597,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
|
||||
static int
|
||||
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct nf_conn *ct, *last;
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_node *n;
|
||||
@ -594,9 +606,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
|
||||
rcu_read_lock();
|
||||
last = (struct nf_conn *)cb->args[1];
|
||||
for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
|
||||
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
|
||||
restart:
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
|
||||
hnnode) {
|
||||
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
|
||||
continue;
|
||||
@ -703,6 +715,11 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
|
||||
[CTA_TUPLE_IP] = { .type = NLA_NESTED },
|
||||
[CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static int
|
||||
ctnetlink_parse_tuple(const struct nlattr * const cda[],
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
@ -713,7 +730,7 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
|
||||
|
||||
memset(tuple, 0, sizeof(*tuple));
|
||||
|
||||
nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL);
|
||||
nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
|
||||
|
||||
if (!tb[CTA_TUPLE_IP])
|
||||
return -EINVAL;
|
||||
@ -740,12 +757,31 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
|
||||
{
|
||||
if (attr)
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
*zone = ntohs(nla_get_be16(attr));
|
||||
#else
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
else
|
||||
*zone = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
|
||||
[CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
|
||||
};
|
||||
|
||||
static inline int
|
||||
ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
|
||||
{
|
||||
struct nlattr *tb[CTA_HELP_MAX+1];
|
||||
|
||||
nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL);
|
||||
nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
|
||||
|
||||
if (!tb[CTA_HELP_NAME])
|
||||
return -EINVAL;
|
||||
@ -756,11 +792,18 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
|
||||
}
|
||||
|
||||
static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
|
||||
[CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
|
||||
[CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
|
||||
[CTA_STATUS] = { .type = NLA_U32 },
|
||||
[CTA_PROTOINFO] = { .type = NLA_NESTED },
|
||||
[CTA_HELP] = { .type = NLA_NESTED },
|
||||
[CTA_NAT_SRC] = { .type = NLA_NESTED },
|
||||
[CTA_TIMEOUT] = { .type = NLA_U32 },
|
||||
[CTA_MARK] = { .type = NLA_U32 },
|
||||
[CTA_USE] = { .type = NLA_U32 },
|
||||
[CTA_ID] = { .type = NLA_U32 },
|
||||
[CTA_NAT_DST] = { .type = NLA_NESTED },
|
||||
[CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
|
||||
[CTA_ZONE] = { .type = NLA_U16 },
|
||||
};
|
||||
|
||||
static int
|
||||
@ -768,12 +811,18 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
{
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conn *ct;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
int err = 0;
|
||||
u16 zone;
|
||||
int err;
|
||||
|
||||
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_TUPLE_ORIG])
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
|
||||
@ -781,7 +830,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
|
||||
else {
|
||||
/* Flush the whole table */
|
||||
nf_conntrack_flush_report(&init_net,
|
||||
nf_conntrack_flush_report(net,
|
||||
NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
return 0;
|
||||
@ -790,7 +839,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
h = nf_conntrack_find_get(&init_net, &tuple);
|
||||
h = nf_conntrack_find_get(net, zone, &tuple);
|
||||
if (!h)
|
||||
return -ENOENT;
|
||||
|
||||
@ -828,18 +877,24 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
{
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conn *ct;
|
||||
struct sk_buff *skb2 = NULL;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
int err = 0;
|
||||
u16 zone;
|
||||
int err;
|
||||
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP)
|
||||
return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
|
||||
ctnetlink_done);
|
||||
|
||||
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_TUPLE_ORIG])
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
|
||||
else if (cda[CTA_TUPLE_REPLY])
|
||||
@ -850,7 +905,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
h = nf_conntrack_find_get(&init_net, &tuple);
|
||||
h = nf_conntrack_find_get(net, zone, &tuple);
|
||||
if (!h)
|
||||
return -ENOENT;
|
||||
|
||||
@ -994,7 +1049,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
|
||||
return 0;
|
||||
}
|
||||
|
||||
helper = __nf_conntrack_helper_find_byname(helpname);
|
||||
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
|
||||
nf_ct_protonum(ct));
|
||||
if (helper == NULL) {
|
||||
#ifdef CONFIG_MODULES
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
@ -1005,7 +1061,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
|
||||
}
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
helper = __nf_conntrack_helper_find_byname(helpname);
|
||||
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
|
||||
nf_ct_protonum(ct));
|
||||
if (helper)
|
||||
return -EAGAIN;
|
||||
#endif
|
||||
@ -1044,6 +1101,12 @@ ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
|
||||
[CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
|
||||
[CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
|
||||
[CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static inline int
|
||||
ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
|
||||
{
|
||||
@ -1052,7 +1115,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
int err = 0;
|
||||
|
||||
nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
|
||||
nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
|
||||
|
||||
rcu_read_lock();
|
||||
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
|
||||
@ -1064,12 +1127,18 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_NAT_NEEDED
|
||||
static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
|
||||
[CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
|
||||
[CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
|
||||
[CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static inline int
|
||||
change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
|
||||
{
|
||||
struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
|
||||
|
||||
nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL);
|
||||
nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
|
||||
|
||||
if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
|
||||
return -EINVAL;
|
||||
@ -1175,7 +1244,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
|
||||
}
|
||||
|
||||
static struct nf_conn *
|
||||
ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
ctnetlink_create_conntrack(struct net *net, u16 zone,
|
||||
const struct nlattr * const cda[],
|
||||
struct nf_conntrack_tuple *otuple,
|
||||
struct nf_conntrack_tuple *rtuple,
|
||||
u8 u3)
|
||||
@ -1184,7 +1254,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
int err = -EINVAL;
|
||||
struct nf_conntrack_helper *helper;
|
||||
|
||||
ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
|
||||
ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
|
||||
if (IS_ERR(ct))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1203,7 +1273,8 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
helper = __nf_conntrack_helper_find_byname(helpname);
|
||||
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
|
||||
nf_ct_protonum(ct));
|
||||
if (helper == NULL) {
|
||||
rcu_read_unlock();
|
||||
#ifdef CONFIG_MODULES
|
||||
@ -1213,7 +1284,9 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
helper = __nf_conntrack_helper_find_byname(helpname);
|
||||
helper = __nf_conntrack_helper_find(helpname,
|
||||
nf_ct_l3num(ct),
|
||||
nf_ct_protonum(ct));
|
||||
if (helper) {
|
||||
err = -EAGAIN;
|
||||
goto err2;
|
||||
@ -1236,7 +1309,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
}
|
||||
} else {
|
||||
/* try an implicit helper assignation */
|
||||
err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
|
||||
err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
}
|
||||
@ -1268,7 +1341,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
}
|
||||
|
||||
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
|
||||
nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
|
||||
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
|
||||
|
||||
#if defined(CONFIG_NF_CONNTRACK_MARK)
|
||||
if (cda[CTA_MARK])
|
||||
@ -1285,7 +1358,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
master_h = nf_conntrack_find_get(&init_net, &master);
|
||||
master_h = nf_conntrack_find_get(net, zone, &master);
|
||||
if (master_h == NULL) {
|
||||
err = -ENOENT;
|
||||
goto err2;
|
||||
@ -1313,11 +1386,17 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
{
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nf_conntrack_tuple otuple, rtuple;
|
||||
struct nf_conntrack_tuple_hash *h = NULL;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
int err = 0;
|
||||
u16 zone;
|
||||
int err;
|
||||
|
||||
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_TUPLE_ORIG]) {
|
||||
err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
|
||||
@ -1333,9 +1412,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
if (cda[CTA_TUPLE_ORIG])
|
||||
h = __nf_conntrack_find(&init_net, &otuple);
|
||||
h = __nf_conntrack_find(net, zone, &otuple);
|
||||
else if (cda[CTA_TUPLE_REPLY])
|
||||
h = __nf_conntrack_find(&init_net, &rtuple);
|
||||
h = __nf_conntrack_find(net, zone, &rtuple);
|
||||
|
||||
if (h == NULL) {
|
||||
err = -ENOENT;
|
||||
@ -1343,7 +1422,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_events events;
|
||||
|
||||
ct = ctnetlink_create_conntrack(cda, &otuple,
|
||||
ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
|
||||
&rtuple, u3);
|
||||
if (IS_ERR(ct)) {
|
||||
err = PTR_ERR(ct);
|
||||
@ -1357,7 +1436,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
else
|
||||
events = IPCT_NEW;
|
||||
|
||||
nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
|
||||
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
|
||||
(1 << IPCT_ASSURED) |
|
||||
(1 << IPCT_HELPER) |
|
||||
(1 << IPCT_PROTOINFO) |
|
||||
(1 << IPCT_NATSEQADJ) |
|
||||
@ -1382,7 +1462,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
||||
if (err == 0) {
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
|
||||
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
|
||||
(1 << IPCT_ASSURED) |
|
||||
(1 << IPCT_HELPER) |
|
||||
(1 << IPCT_PROTOINFO) |
|
||||
(1 << IPCT_NATSEQADJ) |
|
||||
@ -1469,6 +1550,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
|
||||
const struct nf_conntrack_expect *exp)
|
||||
{
|
||||
struct nf_conn *master = exp->master;
|
||||
struct nf_conntrack_helper *helper;
|
||||
long timeout = (exp->timeout.expires - jiffies) / HZ;
|
||||
|
||||
if (timeout < 0)
|
||||
@ -1485,6 +1567,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
|
||||
|
||||
NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
|
||||
NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
|
||||
helper = rcu_dereference(nfct_help(master)->helper);
|
||||
if (helper)
|
||||
NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1526,9 +1611,10 @@ nla_put_failure:
|
||||
static int
|
||||
ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
|
||||
{
|
||||
struct nf_conntrack_expect *exp = item->exp;
|
||||
struct net *net = nf_ct_exp_net(exp);
|
||||
struct nlmsghdr *nlh;
|
||||
struct nfgenmsg *nfmsg;
|
||||
struct nf_conntrack_expect *exp = item->exp;
|
||||
struct sk_buff *skb;
|
||||
unsigned int type;
|
||||
int flags = 0;
|
||||
@ -1540,7 +1626,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
|
||||
return 0;
|
||||
|
||||
if (!item->report &&
|
||||
!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
|
||||
!nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW))
|
||||
return 0;
|
||||
|
||||
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
|
||||
@ -1563,7 +1649,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
|
||||
rcu_read_unlock();
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
|
||||
nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
|
||||
item->report, GFP_ATOMIC);
|
||||
return 0;
|
||||
|
||||
@ -1573,7 +1659,7 @@ nla_put_failure:
|
||||
nlmsg_failure:
|
||||
kfree_skb(skb);
|
||||
errout:
|
||||
nfnetlink_set_err(0, 0, -ENOBUFS);
|
||||
nfnetlink_set_err(net, 0, 0, -ENOBUFS);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@ -1587,7 +1673,7 @@ static int ctnetlink_exp_done(struct netlink_callback *cb)
|
||||
static int
|
||||
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct net *net = &init_net;
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct nf_conntrack_expect *exp, *last;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
|
||||
struct hlist_node *n;
|
||||
@ -1631,8 +1717,12 @@ out:
|
||||
}
|
||||
|
||||
static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
|
||||
[CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
|
||||
[CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
|
||||
[CTA_EXPECT_MASK] = { .type = NLA_NESTED },
|
||||
[CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
|
||||
[CTA_EXPECT_ID] = { .type = NLA_U32 },
|
||||
[CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
|
||||
};
|
||||
|
||||
static int
|
||||
@ -1640,12 +1730,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
{
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct sk_buff *skb2;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
int err = 0;
|
||||
u16 zone;
|
||||
int err;
|
||||
|
||||
if (nlh->nlmsg_flags & NLM_F_DUMP) {
|
||||
return netlink_dump_start(ctnl, skb, nlh,
|
||||
@ -1653,6 +1745,10 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
ctnetlink_exp_done);
|
||||
}
|
||||
|
||||
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_EXPECT_MASTER])
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
|
||||
else
|
||||
@ -1661,7 +1757,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
exp = nf_ct_expect_find_get(&init_net, &tuple);
|
||||
exp = nf_ct_expect_find_get(net, zone, &tuple);
|
||||
if (!exp)
|
||||
return -ENOENT;
|
||||
|
||||
@ -1701,23 +1797,28 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
{
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_helper *h;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
struct hlist_node *n, *next;
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
unsigned int i;
|
||||
u16 zone;
|
||||
int err;
|
||||
|
||||
if (cda[CTA_EXPECT_TUPLE]) {
|
||||
/* delete a single expect by tuple */
|
||||
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* bump usage count to 2 */
|
||||
exp = nf_ct_expect_find_get(&init_net, &tuple);
|
||||
exp = nf_ct_expect_find_get(net, zone, &tuple);
|
||||
if (!exp)
|
||||
return -ENOENT;
|
||||
|
||||
@ -1740,18 +1841,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
|
||||
/* delete all expectations for this helper */
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
h = __nf_conntrack_helper_find_byname(name);
|
||||
if (!h) {
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
for (i = 0; i < nf_ct_expect_hsize; i++) {
|
||||
hlist_for_each_entry_safe(exp, n, next,
|
||||
&init_net.ct.expect_hash[i],
|
||||
&net->ct.expect_hash[i],
|
||||
hnode) {
|
||||
m_help = nfct_help(exp->master);
|
||||
if (m_help->helper == h
|
||||
&& del_timer(&exp->timeout)) {
|
||||
if (!strcmp(m_help->helper->name, name) &&
|
||||
del_timer(&exp->timeout)) {
|
||||
nf_ct_unlink_expect(exp);
|
||||
nf_ct_expect_put(exp);
|
||||
}
|
||||
@ -1763,7 +1859,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
for (i = 0; i < nf_ct_expect_hsize; i++) {
|
||||
hlist_for_each_entry_safe(exp, n, next,
|
||||
&init_net.ct.expect_hash[i],
|
||||
&net->ct.expect_hash[i],
|
||||
hnode) {
|
||||
if (del_timer(&exp->timeout)) {
|
||||
nf_ct_unlink_expect(exp);
|
||||
@ -1784,7 +1880,9 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
|
||||
}
|
||||
|
||||
static int
|
||||
ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
|
||||
ctnetlink_create_expect(struct net *net, u16 zone,
|
||||
const struct nlattr * const cda[],
|
||||
u_int8_t u3,
|
||||
u32 pid, int report)
|
||||
{
|
||||
struct nf_conntrack_tuple tuple, mask, master_tuple;
|
||||
@ -1806,7 +1904,7 @@ ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
|
||||
return err;
|
||||
|
||||
/* Look for master conntrack of this expectation */
|
||||
h = nf_conntrack_find_get(&init_net, &master_tuple);
|
||||
h = nf_conntrack_find_get(net, zone, &master_tuple);
|
||||
if (!h)
|
||||
return -ENOENT;
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
@ -1846,29 +1944,35 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const cda[])
|
||||
{
|
||||
struct net *net = sock_net(ctnl);
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
|
||||
u_int8_t u3 = nfmsg->nfgen_family;
|
||||
int err = 0;
|
||||
u16 zone;
|
||||
int err;
|
||||
|
||||
if (!cda[CTA_EXPECT_TUPLE]
|
||||
|| !cda[CTA_EXPECT_MASK]
|
||||
|| !cda[CTA_EXPECT_MASTER])
|
||||
return -EINVAL;
|
||||
|
||||
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
exp = __nf_ct_expect_find(&init_net, &tuple);
|
||||
exp = __nf_ct_expect_find(net, zone, &tuple);
|
||||
|
||||
if (!exp) {
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
err = -ENOENT;
|
||||
if (nlh->nlmsg_flags & NLM_F_CREATE) {
|
||||
err = ctnetlink_create_expect(cda,
|
||||
err = ctnetlink_create_expect(net, zone, cda,
|
||||
u3,
|
||||
NETLINK_CB(skb).pid,
|
||||
nlmsg_report(nlh));
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <linux/netfilter/nf_conntrack_proto_gre.h>
|
||||
#include <linux/netfilter/nf_conntrack_pptp.h>
|
||||
|
||||
@ -123,7 +124,7 @@ static void pptp_expectfn(struct nf_conn *ct,
|
||||
pr_debug("trying to unexpect other dir: ");
|
||||
nf_ct_dump_tuple(&inv_t);
|
||||
|
||||
exp_other = nf_ct_expect_find_get(net, &inv_t);
|
||||
exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t);
|
||||
if (exp_other) {
|
||||
/* delete other expectation. */
|
||||
pr_debug("found\n");
|
||||
@ -136,17 +137,18 @@ static void pptp_expectfn(struct nf_conn *ct,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int destroy_sibling_or_exp(struct net *net,
|
||||
static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
|
||||
const struct nf_conntrack_tuple *t)
|
||||
{
|
||||
const struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conn *sibling;
|
||||
u16 zone = nf_ct_zone(ct);
|
||||
|
||||
pr_debug("trying to timeout ct or exp for tuple ");
|
||||
nf_ct_dump_tuple(t);
|
||||
|
||||
h = nf_conntrack_find_get(net, t);
|
||||
h = nf_conntrack_find_get(net, zone, t);
|
||||
if (h) {
|
||||
sibling = nf_ct_tuplehash_to_ctrack(h);
|
||||
pr_debug("setting timeout of conntrack %p to 0\n", sibling);
|
||||
@ -157,7 +159,7 @@ static int destroy_sibling_or_exp(struct net *net,
|
||||
nf_ct_put(sibling);
|
||||
return 1;
|
||||
} else {
|
||||
exp = nf_ct_expect_find_get(net, t);
|
||||
exp = nf_ct_expect_find_get(net, zone, t);
|
||||
if (exp) {
|
||||
pr_debug("unexpect_related of expect %p\n", exp);
|
||||
nf_ct_unexpect_related(exp);
|
||||
@ -182,7 +184,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
|
||||
t.dst.protonum = IPPROTO_GRE;
|
||||
t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
|
||||
t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
|
||||
if (!destroy_sibling_or_exp(net, &t))
|
||||
if (!destroy_sibling_or_exp(net, ct, &t))
|
||||
pr_debug("failed to timeout original pns->pac ct/exp\n");
|
||||
|
||||
/* try reply (pac->pns) tuple */
|
||||
@ -190,7 +192,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
|
||||
t.dst.protonum = IPPROTO_GRE;
|
||||
t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
|
||||
t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
|
||||
if (!destroy_sibling_or_exp(net, &t))
|
||||
if (!destroy_sibling_or_exp(net, ct, &t))
|
||||
pr_debug("failed to timeout reply pac->pns ct/exp\n");
|
||||
}
|
||||
|
||||
|
@ -561,8 +561,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int dccp_error(struct net *net, struct sk_buff *skb,
|
||||
unsigned int dataoff, enum ip_conntrack_info *ctinfo,
|
||||
static int dccp_error(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
u_int8_t pf, unsigned int hooknum)
|
||||
{
|
||||
struct dccp_hdr _dh, *dh;
|
||||
|
@ -241,7 +241,7 @@ static int gre_packet(struct nf_conn *ct,
|
||||
ct->proto.gre.stream_timeout);
|
||||
/* Also, more likely to be important, and not a probe. */
|
||||
set_bit(IPS_ASSURED_BIT, &ct->status);
|
||||
nf_conntrack_event_cache(IPCT_STATUS, ct);
|
||||
nf_conntrack_event_cache(IPCT_ASSURED, ct);
|
||||
} else
|
||||
nf_ct_refresh_acct(ct, ctinfo, skb,
|
||||
ct->proto.gre.timeout);
|
||||
|
@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *ct,
|
||||
new_state == SCTP_CONNTRACK_ESTABLISHED) {
|
||||
pr_debug("Setting assured bit\n");
|
||||
set_bit(IPS_ASSURED_BIT, &ct->status);
|
||||
nf_conntrack_event_cache(IPCT_STATUS, ct);
|
||||
nf_conntrack_event_cache(IPCT_ASSURED, ct);
|
||||
}
|
||||
|
||||
return NF_ACCEPT;
|
||||
|
@ -760,7 +760,7 @@ static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
|
||||
};
|
||||
|
||||
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
|
||||
static int tcp_error(struct net *net,
|
||||
static int tcp_error(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
@ -1045,7 +1045,7 @@ static int tcp_packet(struct nf_conn *ct,
|
||||
after SYN_RECV or a valid answer for a picked up
|
||||
connection. */
|
||||
set_bit(IPS_ASSURED_BIT, &ct->status);
|
||||
nf_conntrack_event_cache(IPCT_STATUS, ct);
|
||||
nf_conntrack_event_cache(IPCT_ASSURED, ct);
|
||||
}
|
||||
nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
|
||||
|
||||
|
@ -77,7 +77,7 @@ static int udp_packet(struct nf_conn *ct,
|
||||
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
|
||||
/* Also, more likely to be important, and not a probe */
|
||||
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
|
||||
nf_conntrack_event_cache(IPCT_STATUS, ct);
|
||||
nf_conntrack_event_cache(IPCT_ASSURED, ct);
|
||||
} else
|
||||
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
|
||||
|
||||
@ -91,8 +91,8 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int udp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||
unsigned int dataoff, enum ip_conntrack_info *ctinfo,
|
||||
u_int8_t pf,
|
||||
unsigned int hooknum)
|
||||
{
|
||||
|
@ -75,7 +75,7 @@ static int udplite_packet(struct nf_conn *ct,
|
||||
nf_ct_udplite_timeout_stream);
|
||||
/* Also, more likely to be important, and not a probe */
|
||||
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
|
||||
nf_conntrack_event_cache(IPCT_STATUS, ct);
|
||||
nf_conntrack_event_cache(IPCT_ASSURED, ct);
|
||||
} else
|
||||
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
|
||||
|
||||
@ -89,7 +89,7 @@ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int udplite_error(struct net *net,
|
||||
static int udplite_error(struct net *net, struct nf_conn *tmpl,
|
||||
struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
enum ip_conntrack_info *ctinfo,
|
||||
|
@ -16,12 +16,14 @@
|
||||
#include <linux/inet.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/netfilter.h>
|
||||
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
#include <linux/netfilter/nf_conntrack_sip.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
@ -50,12 +52,16 @@ module_param(sip_direct_media, int, 0600);
|
||||
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
|
||||
"endpoints only (default 1)");
|
||||
|
||||
unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
|
||||
unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen) __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
|
||||
|
||||
void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
|
||||
|
||||
unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
struct nf_conntrack_expect *exp,
|
||||
@ -63,17 +69,17 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
|
||||
unsigned int matchlen) __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
|
||||
|
||||
unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
|
||||
unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int dataoff,
|
||||
unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
enum sdp_header_types type,
|
||||
enum sdp_header_types term,
|
||||
const union nf_inet_addr *addr)
|
||||
__read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
|
||||
|
||||
unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
|
||||
unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
unsigned int matchoff,
|
||||
@ -82,14 +88,15 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
|
||||
|
||||
unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
|
||||
const char **dptr,
|
||||
unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
unsigned int sdpoff,
|
||||
const union nf_inet_addr *addr)
|
||||
__read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
|
||||
|
||||
unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
|
||||
unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr,
|
||||
unsigned int *datalen,
|
||||
struct nf_conntrack_expect *rtp_exp,
|
||||
@ -236,12 +243,13 @@ int ct_sip_parse_request(const struct nf_conn *ct,
|
||||
return 0;
|
||||
|
||||
/* Find SIP URI */
|
||||
limit -= strlen("sip:");
|
||||
for (; dptr < limit; dptr++) {
|
||||
for (; dptr < limit - strlen("sip:"); dptr++) {
|
||||
if (*dptr == '\r' || *dptr == '\n')
|
||||
return -1;
|
||||
if (strnicmp(dptr, "sip:", strlen("sip:")) == 0)
|
||||
if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) {
|
||||
dptr += strlen("sip:");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!skp_epaddr_len(ct, dptr, limit, &shift))
|
||||
return 0;
|
||||
@ -284,7 +292,8 @@ static const struct sip_header ct_sip_hdrs[] = {
|
||||
[SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
|
||||
[SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
|
||||
[SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
|
||||
[SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
|
||||
[SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
|
||||
[SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len),
|
||||
[SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
|
||||
[SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
|
||||
};
|
||||
@ -516,6 +525,33 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
|
||||
|
||||
static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
|
||||
unsigned int dataoff, unsigned int datalen,
|
||||
const char *name,
|
||||
unsigned int *matchoff, unsigned int *matchlen)
|
||||
{
|
||||
const char *limit = dptr + datalen;
|
||||
const char *start;
|
||||
const char *end;
|
||||
|
||||
limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
|
||||
if (!limit)
|
||||
limit = dptr + datalen;
|
||||
|
||||
start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
|
||||
if (!start)
|
||||
return 0;
|
||||
start += strlen(name);
|
||||
|
||||
end = ct_sip_header_search(start, limit, ";", strlen(";"));
|
||||
if (!end)
|
||||
end = limit;
|
||||
|
||||
*matchoff = start - dptr;
|
||||
*matchlen = end - start;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Parse address from header parameter and return address, offset and length */
|
||||
int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
|
||||
unsigned int dataoff, unsigned int datalen,
|
||||
@ -574,6 +610,29 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
|
||||
|
||||
static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
|
||||
unsigned int dataoff, unsigned int datalen,
|
||||
u8 *proto)
|
||||
{
|
||||
unsigned int matchoff, matchlen;
|
||||
|
||||
if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
|
||||
&matchoff, &matchlen)) {
|
||||
if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP")))
|
||||
*proto = IPPROTO_TCP;
|
||||
else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP")))
|
||||
*proto = IPPROTO_UDP;
|
||||
else
|
||||
return 0;
|
||||
|
||||
if (*proto != nf_ct_protonum(ct))
|
||||
return 0;
|
||||
} else
|
||||
*proto = nf_ct_protonum(ct);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* SDP header parsing: a SDP session description contains an ordered set of
|
||||
* headers, starting with a section containing general session parameters,
|
||||
* optionally followed by multiple media descriptions.
|
||||
@ -682,7 +741,7 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
|
||||
|
||||
static int refresh_signalling_expectation(struct nf_conn *ct,
|
||||
union nf_inet_addr *addr,
|
||||
__be16 port,
|
||||
u8 proto, __be16 port,
|
||||
unsigned int expires)
|
||||
{
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
@ -694,6 +753,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
|
||||
hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
|
||||
if (exp->class != SIP_EXPECT_SIGNALLING ||
|
||||
!nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
|
||||
exp->tuple.dst.protonum != proto ||
|
||||
exp->tuple.dst.u.udp.port != port)
|
||||
continue;
|
||||
if (!del_timer(&exp->timeout))
|
||||
@ -728,7 +788,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
}
|
||||
|
||||
static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
||||
static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
union nf_inet_addr *daddr, __be16 port,
|
||||
enum sip_expectation_classes class,
|
||||
@ -777,7 +837,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
exp = __nf_ct_expect_find(net, &tuple);
|
||||
exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
|
||||
|
||||
if (!exp || exp->master == ct ||
|
||||
nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
|
||||
@ -805,7 +865,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
||||
if (direct_rtp) {
|
||||
nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
|
||||
if (nf_nat_sdp_port &&
|
||||
!nf_nat_sdp_port(skb, dptr, datalen,
|
||||
!nf_nat_sdp_port(skb, dataoff, dptr, datalen,
|
||||
mediaoff, medialen, ntohs(rtp_port)))
|
||||
goto err1;
|
||||
}
|
||||
@ -827,7 +887,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
|
||||
|
||||
nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
|
||||
if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
|
||||
ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp,
|
||||
ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
|
||||
rtp_exp, rtcp_exp,
|
||||
mediaoff, medialen, daddr);
|
||||
else {
|
||||
if (nf_ct_expect_related(rtp_exp) == 0) {
|
||||
@ -847,6 +908,7 @@ err1:
|
||||
static const struct sdp_media_type sdp_media_types[] = {
|
||||
SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
|
||||
SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
|
||||
SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
|
||||
};
|
||||
|
||||
static const struct sdp_media_type *sdp_media_type(const char *dptr,
|
||||
@ -866,13 +928,12 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int process_sdp(struct sk_buff *skb,
|
||||
static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
unsigned int matchoff, matchlen;
|
||||
unsigned int mediaoff, medialen;
|
||||
unsigned int sdpoff;
|
||||
@ -941,7 +1002,7 @@ static int process_sdp(struct sk_buff *skb,
|
||||
else
|
||||
return NF_DROP;
|
||||
|
||||
ret = set_expected_rtp_rtcp(skb, dptr, datalen,
|
||||
ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
|
||||
&rtp_addr, htons(port), t->class,
|
||||
mediaoff, medialen);
|
||||
if (ret != NF_ACCEPT)
|
||||
@ -949,8 +1010,9 @@ static int process_sdp(struct sk_buff *skb,
|
||||
|
||||
/* Update media connection address if present */
|
||||
if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
|
||||
ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen,
|
||||
c_hdr, SDP_HDR_MEDIA, &rtp_addr);
|
||||
ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
|
||||
mediaoff, c_hdr, SDP_HDR_MEDIA,
|
||||
&rtp_addr);
|
||||
if (ret != NF_ACCEPT)
|
||||
return ret;
|
||||
}
|
||||
@ -960,14 +1022,12 @@ static int process_sdp(struct sk_buff *skb,
|
||||
/* Update session connection and owner addresses */
|
||||
nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
|
||||
if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
|
||||
ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr);
|
||||
|
||||
if (ret == NF_ACCEPT && i > 0)
|
||||
help->help.ct_sip_info.invite_cseq = cseq;
|
||||
ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
|
||||
&rtp_addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static int process_invite_response(struct sk_buff *skb,
|
||||
static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq, unsigned int code)
|
||||
{
|
||||
@ -977,13 +1037,13 @@ static int process_invite_response(struct sk_buff *skb,
|
||||
|
||||
if ((code >= 100 && code <= 199) ||
|
||||
(code >= 200 && code <= 299))
|
||||
return process_sdp(skb, dptr, datalen, cseq);
|
||||
return process_sdp(skb, dataoff, dptr, datalen, cseq);
|
||||
else if (help->help.ct_sip_info.invite_cseq == cseq)
|
||||
flush_expectations(ct, true);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int process_update_response(struct sk_buff *skb,
|
||||
static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq, unsigned int code)
|
||||
{
|
||||
@ -993,13 +1053,13 @@ static int process_update_response(struct sk_buff *skb,
|
||||
|
||||
if ((code >= 100 && code <= 199) ||
|
||||
(code >= 200 && code <= 299))
|
||||
return process_sdp(skb, dptr, datalen, cseq);
|
||||
return process_sdp(skb, dataoff, dptr, datalen, cseq);
|
||||
else if (help->help.ct_sip_info.invite_cseq == cseq)
|
||||
flush_expectations(ct, true);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int process_prack_response(struct sk_buff *skb,
|
||||
static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq, unsigned int code)
|
||||
{
|
||||
@ -1009,13 +1069,29 @@ static int process_prack_response(struct sk_buff *skb,
|
||||
|
||||
if ((code >= 100 && code <= 199) ||
|
||||
(code >= 200 && code <= 299))
|
||||
return process_sdp(skb, dptr, datalen, cseq);
|
||||
return process_sdp(skb, dataoff, dptr, datalen, cseq);
|
||||
else if (help->help.ct_sip_info.invite_cseq == cseq)
|
||||
flush_expectations(ct, true);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int process_bye_request(struct sk_buff *skb,
|
||||
static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
unsigned int ret;
|
||||
|
||||
flush_expectations(ct, true);
|
||||
ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
|
||||
if (ret == NF_ACCEPT)
|
||||
help->help.ct_sip_info.invite_cseq = cseq;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq)
|
||||
{
|
||||
@ -1030,7 +1106,7 @@ static int process_bye_request(struct sk_buff *skb,
|
||||
* signalling connections. The expectation is marked inactive and is activated
|
||||
* when receiving a response indicating success from the registrar.
|
||||
*/
|
||||
static int process_register_request(struct sk_buff *skb,
|
||||
static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq)
|
||||
{
|
||||
@ -1042,6 +1118,7 @@ static int process_register_request(struct sk_buff *skb,
|
||||
struct nf_conntrack_expect *exp;
|
||||
union nf_inet_addr *saddr, daddr;
|
||||
__be16 port;
|
||||
u8 proto;
|
||||
unsigned int expires = 0;
|
||||
int ret;
|
||||
typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
|
||||
@ -1074,6 +1151,10 @@ static int process_register_request(struct sk_buff *skb,
|
||||
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
|
||||
&proto) == 0)
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (ct_sip_parse_numerical_param(ct, *dptr,
|
||||
matchoff + matchlen, *datalen,
|
||||
"expires=", NULL, NULL, &expires) < 0)
|
||||
@ -1093,14 +1174,14 @@ static int process_register_request(struct sk_buff *skb,
|
||||
saddr = &ct->tuplehash[!dir].tuple.src.u3;
|
||||
|
||||
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
|
||||
saddr, &daddr, IPPROTO_UDP, NULL, &port);
|
||||
saddr, &daddr, proto, NULL, &port);
|
||||
exp->timeout.expires = sip_timeout * HZ;
|
||||
exp->helper = nfct_help(ct)->helper;
|
||||
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
|
||||
|
||||
nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
|
||||
if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
|
||||
ret = nf_nat_sip_expect(skb, dptr, datalen, exp,
|
||||
ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
|
||||
matchoff, matchlen);
|
||||
else {
|
||||
if (nf_ct_expect_related(exp) != 0)
|
||||
@ -1116,7 +1197,7 @@ store_cseq:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int process_register_response(struct sk_buff *skb,
|
||||
static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen,
|
||||
unsigned int cseq, unsigned int code)
|
||||
{
|
||||
@ -1126,7 +1207,8 @@ static int process_register_response(struct sk_buff *skb,
|
||||
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
|
||||
union nf_inet_addr addr;
|
||||
__be16 port;
|
||||
unsigned int matchoff, matchlen, dataoff = 0;
|
||||
u8 proto;
|
||||
unsigned int matchoff, matchlen, coff = 0;
|
||||
unsigned int expires = 0;
|
||||
int in_contact = 0, ret;
|
||||
|
||||
@ -1153,7 +1235,7 @@ static int process_register_response(struct sk_buff *skb,
|
||||
while (1) {
|
||||
unsigned int c_expires = expires;
|
||||
|
||||
ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
|
||||
ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
|
||||
SIP_HDR_CONTACT, &in_contact,
|
||||
&matchoff, &matchlen,
|
||||
&addr, &port);
|
||||
@ -1166,6 +1248,10 @@ static int process_register_response(struct sk_buff *skb,
|
||||
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
|
||||
continue;
|
||||
|
||||
if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
|
||||
*datalen, &proto) == 0)
|
||||
continue;
|
||||
|
||||
ret = ct_sip_parse_numerical_param(ct, *dptr,
|
||||
matchoff + matchlen,
|
||||
*datalen, "expires=",
|
||||
@ -1174,7 +1260,8 @@ static int process_register_response(struct sk_buff *skb,
|
||||
return NF_DROP;
|
||||
if (c_expires == 0)
|
||||
break;
|
||||
if (refresh_signalling_expectation(ct, &addr, port, c_expires))
|
||||
if (refresh_signalling_expectation(ct, &addr, proto, port,
|
||||
c_expires))
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
@ -1184,7 +1271,7 @@ flush:
|
||||
}
|
||||
|
||||
static const struct sip_handler sip_handlers[] = {
|
||||
SIP_HANDLER("INVITE", process_sdp, process_invite_response),
|
||||
SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
|
||||
SIP_HANDLER("UPDATE", process_sdp, process_update_response),
|
||||
SIP_HANDLER("ACK", process_sdp, NULL),
|
||||
SIP_HANDLER("PRACK", process_sdp, process_prack_response),
|
||||
@ -1192,13 +1279,13 @@ static const struct sip_handler sip_handlers[] = {
|
||||
SIP_HANDLER("REGISTER", process_register_request, process_register_response),
|
||||
};
|
||||
|
||||
static int process_sip_response(struct sk_buff *skb,
|
||||
static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||
unsigned int matchoff, matchlen;
|
||||
unsigned int code, cseq, dataoff, i;
|
||||
unsigned int matchoff, matchlen, matchend;
|
||||
unsigned int code, cseq, i;
|
||||
|
||||
if (*datalen < strlen("SIP/2.0 200"))
|
||||
return NF_ACCEPT;
|
||||
@ -1212,7 +1299,7 @@ static int process_sip_response(struct sk_buff *skb,
|
||||
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
|
||||
if (!cseq)
|
||||
return NF_DROP;
|
||||
dataoff = matchoff + matchlen + 1;
|
||||
matchend = matchoff + matchlen + 1;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
|
||||
const struct sip_handler *handler;
|
||||
@ -1220,15 +1307,16 @@ static int process_sip_response(struct sk_buff *skb,
|
||||
handler = &sip_handlers[i];
|
||||
if (handler->response == NULL)
|
||||
continue;
|
||||
if (*datalen < dataoff + handler->len ||
|
||||
strnicmp(*dptr + dataoff, handler->method, handler->len))
|
||||
if (*datalen < matchend + handler->len ||
|
||||
strnicmp(*dptr + matchend, handler->method, handler->len))
|
||||
continue;
|
||||
return handler->response(skb, dptr, datalen, cseq, code);
|
||||
return handler->response(skb, dataoff, dptr, datalen,
|
||||
cseq, code);
|
||||
}
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int process_sip_request(struct sk_buff *skb,
|
||||
static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
|
||||
const char **dptr, unsigned int *datalen)
|
||||
{
|
||||
enum ip_conntrack_info ctinfo;
|
||||
@ -1253,20 +1341,112 @@ static int process_sip_request(struct sk_buff *skb,
|
||||
if (!cseq)
|
||||
return NF_DROP;
|
||||
|
||||
return handler->request(skb, dptr, datalen, cseq);
|
||||
return handler->request(skb, dataoff, dptr, datalen, cseq);
|
||||
}
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int sip_help(struct sk_buff *skb,
|
||||
unsigned int protoff,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
|
||||
unsigned int dataoff, const char **dptr,
|
||||
unsigned int *datalen)
|
||||
{
|
||||
typeof(nf_nat_sip_hook) nf_nat_sip;
|
||||
int ret;
|
||||
|
||||
if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
|
||||
ret = process_sip_request(skb, dataoff, dptr, datalen);
|
||||
else
|
||||
ret = process_sip_response(skb, dataoff, dptr, datalen);
|
||||
|
||||
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
|
||||
nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
|
||||
if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
|
||||
ret = NF_DROP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
|
||||
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
struct tcphdr *th, _tcph;
|
||||
unsigned int dataoff, datalen;
|
||||
unsigned int matchoff, matchlen, clen;
|
||||
unsigned int msglen, origlen;
|
||||
const char *dptr, *end;
|
||||
s16 diff, tdiff = 0;
|
||||
int ret;
|
||||
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
|
||||
|
||||
if (ctinfo != IP_CT_ESTABLISHED &&
|
||||
ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* No Data ? */
|
||||
th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
|
||||
if (th == NULL)
|
||||
return NF_ACCEPT;
|
||||
dataoff = protoff + th->doff * 4;
|
||||
if (dataoff >= skb->len)
|
||||
return NF_ACCEPT;
|
||||
|
||||
nf_ct_refresh(ct, skb, sip_timeout * HZ);
|
||||
|
||||
if (skb_is_nonlinear(skb)) {
|
||||
pr_debug("Copy of skbuff not supported yet.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
dptr = skb->data + dataoff;
|
||||
datalen = skb->len - dataoff;
|
||||
if (datalen < strlen("SIP/2.0 200"))
|
||||
return NF_ACCEPT;
|
||||
|
||||
while (1) {
|
||||
if (ct_sip_get_header(ct, dptr, 0, datalen,
|
||||
SIP_HDR_CONTENT_LENGTH,
|
||||
&matchoff, &matchlen) <= 0)
|
||||
break;
|
||||
|
||||
clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
|
||||
if (dptr + matchoff == end)
|
||||
break;
|
||||
|
||||
if (end + strlen("\r\n\r\n") > dptr + datalen)
|
||||
break;
|
||||
if (end[0] != '\r' || end[1] != '\n' ||
|
||||
end[2] != '\r' || end[3] != '\n')
|
||||
break;
|
||||
end += strlen("\r\n\r\n") + clen;
|
||||
|
||||
msglen = origlen = end - dptr;
|
||||
|
||||
ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
|
||||
if (ret != NF_ACCEPT)
|
||||
break;
|
||||
diff = msglen - origlen;
|
||||
tdiff += diff;
|
||||
|
||||
dataoff += msglen;
|
||||
dptr += msglen;
|
||||
datalen = datalen + diff - msglen;
|
||||
}
|
||||
|
||||
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
|
||||
nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
|
||||
if (nf_nat_sip_seq_adjust)
|
||||
nf_nat_sip_seq_adjust(skb, tdiff);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
|
||||
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
unsigned int dataoff, datalen;
|
||||
const char *dptr;
|
||||
int ret;
|
||||
typeof(nf_nat_sip_hook) nf_nat_sip;
|
||||
|
||||
/* No Data ? */
|
||||
dataoff = protoff + sizeof(struct udphdr);
|
||||
@ -1275,47 +1455,43 @@ static int sip_help(struct sk_buff *skb,
|
||||
|
||||
nf_ct_refresh(ct, skb, sip_timeout * HZ);
|
||||
|
||||
if (!skb_is_nonlinear(skb))
|
||||
dptr = skb->data + dataoff;
|
||||
else {
|
||||
if (skb_is_nonlinear(skb)) {
|
||||
pr_debug("Copy of skbuff not supported yet.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
dptr = skb->data + dataoff;
|
||||
datalen = skb->len - dataoff;
|
||||
if (datalen < strlen("SIP/2.0 200"))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
|
||||
ret = process_sip_request(skb, &dptr, &datalen);
|
||||
else
|
||||
ret = process_sip_response(skb, &dptr, &datalen);
|
||||
|
||||
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
|
||||
nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
|
||||
if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen))
|
||||
ret = NF_DROP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
|
||||
}
|
||||
|
||||
static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly;
|
||||
static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly;
|
||||
static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
|
||||
static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
|
||||
|
||||
static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
|
||||
[SIP_EXPECT_SIGNALLING] = {
|
||||
.name = "signalling",
|
||||
.max_expected = 1,
|
||||
.timeout = 3 * 60,
|
||||
},
|
||||
[SIP_EXPECT_AUDIO] = {
|
||||
.name = "audio",
|
||||
.max_expected = 2 * IP_CT_DIR_MAX,
|
||||
.timeout = 3 * 60,
|
||||
},
|
||||
[SIP_EXPECT_VIDEO] = {
|
||||
.name = "video",
|
||||
.max_expected = 2 * IP_CT_DIR_MAX,
|
||||
.timeout = 3 * 60,
|
||||
},
|
||||
[SIP_EXPECT_IMAGE] = {
|
||||
.name = "image",
|
||||
.max_expected = IP_CT_DIR_MAX,
|
||||
.timeout = 3 * 60,
|
||||
},
|
||||
};
|
||||
|
||||
static void nf_conntrack_sip_fini(void)
|
||||
@ -1323,7 +1499,7 @@ static void nf_conntrack_sip_fini(void)
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ports_c; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
|
||||
if (sip[i][j].me == NULL)
|
||||
continue;
|
||||
nf_conntrack_helper_unregister(&sip[i][j]);
|
||||
@ -1343,14 +1519,24 @@ static int __init nf_conntrack_sip_init(void)
|
||||
memset(&sip[i], 0, sizeof(sip[i]));
|
||||
|
||||
sip[i][0].tuple.src.l3num = AF_INET;
|
||||
sip[i][1].tuple.src.l3num = AF_INET6;
|
||||
for (j = 0; j < 2; j++) {
|
||||
sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
|
||||
sip[i][0].tuple.dst.protonum = IPPROTO_UDP;
|
||||
sip[i][0].help = sip_help_udp;
|
||||
sip[i][1].tuple.src.l3num = AF_INET;
|
||||
sip[i][1].tuple.dst.protonum = IPPROTO_TCP;
|
||||
sip[i][1].help = sip_help_tcp;
|
||||
|
||||
sip[i][2].tuple.src.l3num = AF_INET6;
|
||||
sip[i][2].tuple.dst.protonum = IPPROTO_UDP;
|
||||
sip[i][2].help = sip_help_udp;
|
||||
sip[i][3].tuple.src.l3num = AF_INET6;
|
||||
sip[i][3].tuple.dst.protonum = IPPROTO_TCP;
|
||||
sip[i][3].help = sip_help_tcp;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
|
||||
sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
|
||||
sip[i][j].expect_policy = sip_exp_policy;
|
||||
sip[i][j].expect_class_max = SIP_EXPECT_MAX;
|
||||
sip[i][j].me = THIS_MODULE;
|
||||
sip[i][j].help = sip_help;
|
||||
|
||||
tmpname = &sip_names[i][j][0];
|
||||
if (ports[i] == SIP_PORT)
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
#include <net/netfilter/nf_conntrack_acct.h>
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@ -171,6 +172,11 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
||||
goto release;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
|
||||
goto release;
|
||||
#endif
|
||||
|
||||
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
|
||||
goto release;
|
||||
|
||||
|
@ -40,7 +40,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
|
||||
|
||||
static char __initdata nfversion[] = "0.30";
|
||||
|
||||
static struct sock *nfnl = NULL;
|
||||
static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
|
||||
static DEFINE_MUTEX(nfnl_mutex);
|
||||
|
||||
@ -101,34 +100,35 @@ nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
|
||||
return &ss->cb[cb_id];
|
||||
}
|
||||
|
||||
int nfnetlink_has_listeners(unsigned int group)
|
||||
int nfnetlink_has_listeners(struct net *net, unsigned int group)
|
||||
{
|
||||
return netlink_has_listeners(nfnl, group);
|
||||
return netlink_has_listeners(net->nfnl, group);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
|
||||
|
||||
int nfnetlink_send(struct sk_buff *skb, u32 pid,
|
||||
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
|
||||
unsigned group, int echo, gfp_t flags)
|
||||
{
|
||||
return nlmsg_notify(nfnl, skb, pid, group, echo, flags);
|
||||
return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfnetlink_send);
|
||||
|
||||
void nfnetlink_set_err(u32 pid, u32 group, int error)
|
||||
void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
|
||||
{
|
||||
netlink_set_err(nfnl, pid, group, error);
|
||||
netlink_set_err(net->nfnl, pid, group, error);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
|
||||
|
||||
int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags)
|
||||
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags)
|
||||
{
|
||||
return netlink_unicast(nfnl, skb, pid, flags);
|
||||
return netlink_unicast(net->nfnl, skb, pid, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
|
||||
|
||||
/* Process one complete nfnetlink message. */
|
||||
static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
const struct nfnl_callback *nc;
|
||||
const struct nfnetlink_subsystem *ss;
|
||||
int type, err;
|
||||
@ -170,7 +170,7 @@ replay:
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda);
|
||||
err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda);
|
||||
if (err == -EAGAIN)
|
||||
goto replay;
|
||||
return err;
|
||||
@ -184,26 +184,45 @@ static void nfnetlink_rcv(struct sk_buff *skb)
|
||||
nfnl_unlock();
|
||||
}
|
||||
|
||||
static void __exit nfnetlink_exit(void)
|
||||
static int __net_init nfnetlink_net_init(struct net *net)
|
||||
{
|
||||
printk("Removing netfilter NETLINK layer.\n");
|
||||
netlink_kernel_release(nfnl);
|
||||
return;
|
||||
struct sock *nfnl;
|
||||
|
||||
nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
|
||||
nfnetlink_rcv, NULL, THIS_MODULE);
|
||||
if (!nfnl)
|
||||
return -ENOMEM;
|
||||
net->nfnl_stash = nfnl;
|
||||
rcu_assign_pointer(net->nfnl, nfnl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
rcu_assign_pointer(net->nfnl, NULL);
|
||||
synchronize_net();
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
netlink_kernel_release(net->nfnl_stash);
|
||||
}
|
||||
|
||||
static struct pernet_operations nfnetlink_net_ops = {
|
||||
.init = nfnetlink_net_init,
|
||||
.exit_batch = nfnetlink_net_exit_batch,
|
||||
};
|
||||
|
||||
static int __init nfnetlink_init(void)
|
||||
{
|
||||
printk("Netfilter messages via NETLINK v%s.\n", nfversion);
|
||||
|
||||
nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX,
|
||||
nfnetlink_rcv, NULL, THIS_MODULE);
|
||||
if (!nfnl) {
|
||||
printk(KERN_ERR "cannot initialize nfnetlink!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return register_pernet_subsys(&nfnetlink_net_ops);
|
||||
}
|
||||
|
||||
static void __exit nfnetlink_exit(void)
|
||||
{
|
||||
printk("Removing netfilter NETLINK layer.\n");
|
||||
unregister_pernet_subsys(&nfnetlink_net_ops);
|
||||
}
|
||||
module_init(nfnetlink_init);
|
||||
module_exit(nfnetlink_exit);
|
||||
|
@ -323,7 +323,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
|
||||
NLMSG_DONE,
|
||||
sizeof(struct nfgenmsg));
|
||||
|
||||
status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
|
||||
status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
|
||||
MSG_DONTWAIT);
|
||||
|
||||
inst->qlen = 0;
|
||||
inst->skb = NULL;
|
||||
|
@ -112,7 +112,6 @@ instance_create(u_int16_t queue_num, int pid)
|
||||
inst->copy_mode = NFQNL_COPY_NONE;
|
||||
spin_lock_init(&inst->lock);
|
||||
INIT_LIST_HEAD(&inst->queue_list);
|
||||
INIT_RCU_HEAD(&inst->rcu);
|
||||
|
||||
if (!try_module_get(THIS_MODULE)) {
|
||||
err = -EAGAIN;
|
||||
@ -414,13 +413,13 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
|
||||
queue->queue_dropped++;
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING "nf_queue: full at %d entries, "
|
||||
"dropping packets(s). Dropped: %d\n",
|
||||
queue->queue_total, queue->queue_dropped);
|
||||
"dropping packets(s).\n",
|
||||
queue->queue_total);
|
||||
goto err_out_free_nskb;
|
||||
}
|
||||
|
||||
/* nfnetlink_unicast will either free the nskb or add it to a socket */
|
||||
err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
|
||||
err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
|
||||
if (err < 0) {
|
||||
queue->queue_user_dropped++;
|
||||
goto err_out_unlock;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user