Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (23 commits)
  [SCTP]: Fix compiler warning.
  [IP] TUNNEL: Fix to be built with user application.
  [IPV6]: Fixed the size of the netlink message notified by inet6_rt_notify().
  [TCP]: rare bad TCP checksum with 2.6.19
  [NET]: Process include/linux/if_{addr,link}.h with unifdef
  [NETFILTER]: Fix iptables ABI breakage on (at least) CRIS
  [IRDA] vlsi_ir.{h,c}: remove kernel 2.4 code
  [TCP]: skb is unexpectedly freed.
  [IPSEC]: Policy list disorder
  [IrDA]: Removed incorrect IRDA_ASSERT()
  [IrDA]: irda-usb TX path optimization (was Re: IrDA spams logfiles - since 2.6.19)
  [X.25]: Add missing sock_put in x25_receive_data
  [SCTP]: Fix SACK sequence during shutdown
  [SCTP]: Correctly handle unexpected INIT-ACK chunk.
  [SCTP]: Verify some mandatory parameters.
  [SCTP]: Set correct error cause value for missing parameters
  [NETFILTER]: fix xt_state compile failure
  [NETFILTER]: ctnetlink: fix leak in ctnetlink_create_conntrack error path
  [SELINUX]: increment flow cache genid
  [IPV6] MCAST: Fix joining all-node multicast group on device initialization.
  ...
This commit is contained in:
Linus Torvalds 2007-01-24 07:45:35 -08:00
commit 6f3776c9cd
25 changed files with 104 additions and 133 deletions

View File

@ -441,25 +441,13 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
goto drop;
}
/* Make sure there is room for IrDA-USB header. The actual
* allocation will be done lower in skb_push().
* Also, we don't use directly skb_cow(), because it require
* headroom >= 16, which force unnecessary copies - Jean II */
if (skb_headroom(skb) < self->header_length) {
IRDA_DEBUG(0, "%s(), Insuficient skb headroom.\n", __FUNCTION__);
if (skb_cow(skb, self->header_length)) {
IRDA_WARNING("%s(), failed skb_cow() !!!\n", __FUNCTION__);
goto drop;
}
}
memcpy(self->tx_buff + self->header_length, skb->data, skb->len);
/* Change setting for next frame */
if (self->capability & IUC_STIR421X) {
__u8 turnaround_time;
__u8* frame;
__u8* frame = self->tx_buff;
turnaround_time = get_turnaround_time( skb );
frame= skb_push(skb, self->header_length);
irda_usb_build_header(self, frame, 0);
frame[2] = turnaround_time;
if ((skb->len != 0) &&
@ -472,17 +460,17 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
frame[1] = 0;
}
} else {
irda_usb_build_header(self, skb_push(skb, self->header_length), 0);
irda_usb_build_header(self, self->tx_buff, 0);
}
/* FIXME: Make macro out of this one */
((struct irda_skb_cb *)skb->cb)->context = self;
usb_fill_bulk_urb(urb, self->usbdev,
usb_fill_bulk_urb(urb, self->usbdev,
usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
skb->data, IRDA_SKB_MAX_MTU,
self->tx_buff, skb->len + self->header_length,
write_bulk_callback, skb);
urb->transfer_buffer_length = skb->len;
/* This flag (URB_ZERO_PACKET) indicates that what we send is not
* a continuous stream of data but separate packets.
* In this case, the USB layer will insert an empty USB frame (TD)
@ -1455,6 +1443,9 @@ static inline void irda_usb_close(struct irda_usb_cb *self)
/* Remove the speed buffer */
kfree(self->speed_buff);
self->speed_buff = NULL;
kfree(self->tx_buff);
self->tx_buff = NULL;
}
/********************** USB CONFIG SUBROUTINES **********************/
@ -1524,8 +1515,6 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
__FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
/* Should be 8, 16, 32 or 64 bytes */
IRDA_ASSERT(self->bulk_out_mtu == 64, ;);
return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
}
@ -1753,9 +1742,14 @@ static int irda_usb_probe(struct usb_interface *intf,
memset(self->speed_buff, 0, IRDA_USB_SPEED_MTU);
self->tx_buff = kzalloc(IRDA_SKB_MAX_MTU + self->header_length,
GFP_KERNEL);
if (self->tx_buff == NULL)
goto err_out_4;
ret = irda_usb_open(self);
if (ret)
goto err_out_4;
goto err_out_5;
IRDA_MESSAGE("IrDA: Registered device %s\n", net->name);
usb_set_intfdata(intf, self);
@ -1766,14 +1760,14 @@ static int irda_usb_probe(struct usb_interface *intf,
self->needspatch = (ret < 0);
if (self->needspatch) {
IRDA_ERROR("STIR421X: Couldn't upload patch\n");
goto err_out_5;
goto err_out_6;
}
/* replace IrDA class descriptor with what patched device is now reporting */
irda_desc = irda_usb_find_class_desc (self->usbintf);
if (irda_desc == NULL) {
ret = -ENODEV;
goto err_out_5;
goto err_out_6;
}
if (self->irda_desc)
kfree (self->irda_desc);
@ -1782,9 +1776,10 @@ static int irda_usb_probe(struct usb_interface *intf,
}
return 0;
err_out_5:
err_out_6:
unregister_netdev(self->netdev);
err_out_5:
kfree(self->tx_buff);
err_out_4:
kfree(self->speed_buff);
err_out_3:

View File

@ -156,6 +156,7 @@ struct irda_usb_cb {
struct irlap_cb *irlap; /* The link layer we are binded to */
struct qos_info qos;
char *speed_buff; /* Buffer for speed changes */
char *tx_buff;
struct timeval stamp;
struct timeval now;

View File

@ -166,7 +166,7 @@ static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
unsigned i;
seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n",
PCIDEV_NAME(pdev), (int)pdev->vendor, (int)pdev->device);
pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
@ -1401,7 +1401,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
if (vlsi_start_hw(idev))
IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
__FUNCTION__, PCIDEV_NAME(idev->pdev), ndev->name);
__FUNCTION__, pci_name(idev->pdev), ndev->name);
else
netif_start_queue(ndev);
}
@ -1643,7 +1643,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->current_state = 0; /* hw must be running now */
IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n",
drivername, PCIDEV_NAME(pdev));
drivername, pci_name(pdev));
if ( !pci_resource_start(pdev,0)
|| !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
@ -1728,7 +1728,7 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
IRDA_MESSAGE("%s: %s removed\n", drivername, PCIDEV_NAME(pdev));
IRDA_MESSAGE("%s: %s removed\n", drivername, pci_name(pdev));
}
#ifdef CONFIG_PM
@ -1748,7 +1748,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
if (!ndev) {
IRDA_ERROR("%s - %s: no netdevice \n",
__FUNCTION__, PCIDEV_NAME(pdev));
__FUNCTION__, pci_name(pdev));
return 0;
}
idev = ndev->priv;
@ -1759,7 +1759,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
pdev->current_state = state.event;
}
else
IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, PCIDEV_NAME(pdev), pdev->current_state, state.event);
IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event);
up(&idev->sem);
return 0;
}
@ -1787,7 +1787,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
if (!ndev) {
IRDA_ERROR("%s - %s: no netdevice \n",
__FUNCTION__, PCIDEV_NAME(pdev));
__FUNCTION__, pci_name(pdev));
return 0;
}
idev = ndev->priv;
@ -1795,7 +1795,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
if (pdev->current_state == 0) {
up(&idev->sem);
IRDA_WARNING("%s - %s: already resumed\n",
__FUNCTION__, PCIDEV_NAME(pdev));
__FUNCTION__, pci_name(pdev));
return 0;
}

View File

@ -41,39 +41,6 @@
#define PCI_CLASS_SUBCLASS_MASK 0xffff
#endif
/* in recent 2.5 interrupt handlers have non-void return value */
#ifndef IRQ_RETVAL
typedef void irqreturn_t;
#define IRQ_NONE
#define IRQ_HANDLED
#define IRQ_RETVAL(x)
#endif
/* some stuff need to check kernelversion. Not all 2.5 stuff was present
* in early 2.5.x - the test is merely to separate 2.4 from 2.5
*/
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/* PDE() introduced in 2.5.4 */
#ifdef CONFIG_PROC_FS
#define PDE(inode) ((inode)->i_private)
#endif
/* irda crc16 calculation exported in 2.5.42 */
#define irda_calc_crc16(fcs,buf,len) (GOOD_FCS)
/* we use this for unified pci device name access */
#define PCIDEV_NAME(pdev) ((pdev)->name)
#else /* 2.5 or later */
/* whatever we get from the associated struct device - bus:slot:dev.fn id */
#define PCIDEV_NAME(pdev) (pci_name(pdev))
#endif
/* ================================================================ */
/* non-standard PCI registers */

View File

@ -69,7 +69,6 @@ header-y += hysdn_if.h
header-y += i2c-dev.h
header-y += i8k.h
header-y += icmp.h
header-y += if_addr.h
header-y += if_arcnet.h
header-y += if_arp.h
header-y += if_bonding.h
@ -79,7 +78,6 @@ header-y += if_fddi.h
header-y += if.h
header-y += if_hippi.h
header-y += if_infiniband.h
header-y += if_link.h
header-y += if_packet.h
header-y += if_plip.h
header-y += if_ppp.h
@ -213,6 +211,7 @@ unifdef-y += hpet.h
unifdef-y += i2c.h
unifdef-y += i2o-dev.h
unifdef-y += icmpv6.h
unifdef-y += if_addr.h
unifdef-y += if_bridge.h
unifdef-y += if_ec.h
unifdef-y += if_eql.h
@ -220,6 +219,7 @@ unifdef-y += if_ether.h
unifdef-y += if_fddi.h
unifdef-y += if_frad.h
unifdef-y += if_ltalk.h
unifdef-y += if_link.h
unifdef-y += if_pppox.h
unifdef-y += if_shaper.h
unifdef-y += if_tr.h

View File

@ -1,6 +1,8 @@
#ifndef _IF_TUNNEL_H_
#define _IF_TUNNEL_H_
#include <linux/types.h>
#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0)
#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1)
#define SIOCDELTUNNEL (SIOCDEVPRIVATE + 2)

View File

@ -28,7 +28,7 @@
#include <linux/netfilter/x_tables.h>
#define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
#define IPT_TABLE_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
#define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
#define ipt_match xt_match
#define ipt_target xt_target
#define ipt_table xt_table

View File

@ -6,6 +6,7 @@
#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/socket.h>
#ifdef CONFIG_IP_NF_CONNTRACK_MARK
static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,

View File

@ -134,6 +134,7 @@ sctp_state_fn_t sctp_sf_violation;
sctp_state_fn_t sctp_sf_discard_chunk;
sctp_state_fn_t sctp_sf_do_5_2_1_siminit;
sctp_state_fn_t sctp_sf_do_5_2_2_dupinit;
sctp_state_fn_t sctp_sf_do_5_2_3_initack;
sctp_state_fn_t sctp_sf_do_5_2_4_dupcook;
sctp_state_fn_t sctp_sf_unk_chunk;
sctp_state_fn_t sctp_sf_do_8_5_1_E_sa;

View File

@ -585,6 +585,12 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
goto done;
}
if (la->l2_psm > 0 && btohs(la->l2_psm) < 0x1001 &&
!capable(CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto done;
}
write_lock_bh(&l2cap_sk_list.lock);
if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
@ -2150,8 +2156,8 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
sk->sk_state, pi->psm, pi->scid, pi->dcid, pi->imtu,
pi->omtu, pi->link_mode);
sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
pi->imtu, pi->omtu, pi->link_mode);
}
read_unlock_bh(&l2cap_sk_list.lock);

View File

@ -231,22 +231,16 @@ nocache:
err = resolver(key, family, dir, &obj, &obj_ref);
if (fle) {
if (err) {
/* Force security policy check on next lookup */
*head = fle->next;
flow_entry_kill(cpu, fle);
} else {
fle->genid = atomic_read(&flow_cache_genid);
if (fle && !err) {
fle->genid = atomic_read(&flow_cache_genid);
if (fle->object)
atomic_dec(fle->object_ref);
if (fle->object)
atomic_dec(fle->object_ref);
fle->object = obj;
fle->object_ref = obj_ref;
if (obj)
atomic_inc(fle->object_ref);
}
fle->object = obj;
fle->object_ref = obj_ref;
if (obj)
atomic_inc(fle->object_ref);
}
local_bh_enable();

View File

@ -959,7 +959,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
if (cda[CTA_PROTOINFO-1]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
return err;
goto err;
}
#if defined(CONFIG_IP_NF_CONNTRACK_MARK)

View File

@ -4420,9 +4420,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* But, this leaves one open to an easy denial of
* service attack, and SYN cookies can't defend
* against this problem. So, we drop the data
* in the interest of security over speed.
* in the interest of security over speed unless
* it's still in use.
*/
goto discard;
kfree_skb(skb);
return 0;
}
goto discard;

View File

@ -1650,7 +1650,8 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
skb->ip_summed = next_skb->ip_summed;
if (next_skb->ip_summed == CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_PARTIAL;
if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);

View File

@ -341,6 +341,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
{
struct inet6_dev *ndev;
struct in6_addr maddr;
ASSERT_RTNL();
@ -425,6 +426,11 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
#endif
/* protected by rtnl_lock */
rcu_assign_pointer(dev->ip6_ptr, ndev);
/* Join all-node multicast group */
ipv6_addr_all_nodes(&maddr);
ipv6_dev_mc_inc(dev, &maddr);
return ndev;
}

View File

@ -2258,8 +2258,6 @@ void ipv6_mc_up(struct inet6_dev *idev)
void ipv6_mc_init_dev(struct inet6_dev *idev)
{
struct in6_addr maddr;
write_lock_bh(&idev->lock);
rwlock_init(&idev->mc_lock);
idev->mc_gq_running = 0;
@ -2275,10 +2273,6 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
idev->mc_v1_seen = 0;
write_unlock_bh(&idev->lock);
/* Add all-nodes address. */
ipv6_addr_all_nodes(&maddr);
ipv6_dev_mc_inc(idev->dev, &maddr);
}
/*

View File

@ -2017,6 +2017,7 @@ static inline size_t rt6_nlmsg_size(void)
+ nla_total_size(4) /* RTA_IIF */
+ nla_total_size(4) /* RTA_OIF */
+ nla_total_size(4) /* RTA_PRIORITY */
+ RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
+ nla_total_size(sizeof(struct rta_cacheinfo));
}

View File

@ -981,7 +981,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
if (cda[CTA_PROTOINFO-1]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
return err;
goto err;
}
#if defined(CONFIG_NF_CONNTRACK_MARK)

View File

@ -1562,7 +1562,7 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
if (*errp) {
report.num_missing = htonl(1);
report.type = paramtype;
sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM,
sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
&report, sizeof(report));
}
@ -1775,7 +1775,9 @@ int sctp_verify_init(const struct sctp_association *asoc,
/* Verify stream values are non-zero. */
if ((0 == peer_init->init_hdr.num_outbound_streams) ||
(0 == peer_init->init_hdr.num_inbound_streams)) {
(0 == peer_init->init_hdr.num_inbound_streams) ||
(0 == peer_init->init_hdr.init_tag) ||
(SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
sctp_process_inv_mandatory(asoc, chunk, errp);
return 0;

View File

@ -217,7 +217,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
asoc->peer.sack_needed = 0;
error = sctp_outq_tail(&asoc->outqueue, sack);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
/* Stop the SACK timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,

View File

@ -440,7 +440,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
{
struct sctp_chunk *chunk = arg;
sctp_init_chunk_t *initchunk;
__u32 init_tag;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_error_t error;
@ -462,24 +461,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
init_tag = ntohl(chunk->subh.init_hdr->init_tag);
/* Verification Tag: 3.3.3
* If the value of the Initiate Tag in a received INIT ACK
* chunk is found to be 0, the receiver MUST treat it as an
* error and close the association by transmitting an ABORT.
*/
if (!init_tag) {
struct sctp_chunk *reply = sctp_make_abort(asoc, chunk, 0);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
ECONNREFUSED, asoc,
chunk->transport);
}
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
@ -550,9 +531,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
SCTP_CHUNK(err_chunk));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
@ -1553,6 +1531,28 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
}
/*
* Unexpected INIT-ACK handler.
*
* Section 5.2.3
* If an INIT ACK received by an endpoint in any state other than the
* COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk.
* An unexpected INIT ACK usually indicates the processing of an old or
* duplicated INIT chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* Per the above section, we'll discard the chunk if we have an
* endpoint. If this is an OOTB INIT-ACK, treat it as such.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
return sctp_sf_ootb(ep, asoc, type, arg, commands);
else
return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
}
/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
*

View File

@ -152,7 +152,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_do_5_1C_ack), \
/* SCTP_STATE_COOKIE_ECHOED */ \

View File

@ -56,6 +56,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
sk_add_backlog(sk, skb);
}
bh_unlock_sock(sk);
sock_put(sk);
return queued;
}

View File

@ -650,19 +650,18 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct xfrm_policy *pol;
struct xfrm_policy *delpol;
struct hlist_head *chain;
struct hlist_node *entry, *newpos, *last;
struct hlist_node *entry, *newpos;
struct dst_entry *gc_list;
write_lock_bh(&xfrm_policy_lock);
chain = policy_hash_bysel(&policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
last = NULL;
hlist_for_each_entry(pol, entry, chain, bydst) {
if (!delpol &&
pol->type == policy->type &&
if (pol->type == policy->type &&
!selector_cmp(&pol->selector, &policy->selector) &&
xfrm_sec_ctx_match(pol->security, policy->security)) {
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
write_unlock_bh(&xfrm_policy_lock);
return -EEXIST;
@ -671,17 +670,12 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
if (policy->priority > pol->priority)
continue;
} else if (policy->priority >= pol->priority) {
last = &pol->bydst;
newpos = &pol->bydst;
continue;
}
if (!newpos)
newpos = &pol->bydst;
if (delpol)
break;
last = &pol->bydst;
}
if (!newpos)
newpos = last;
if (newpos)
hlist_add_after(newpos, &policy->bydst);
else

View File

@ -1299,6 +1299,7 @@ int security_load_policy(void *data, size_t len)
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_netlbl_cache_invalidate();
atomic_inc(&flow_cache_genid);
return 0;
}
@ -1354,6 +1355,7 @@ int security_load_policy(void *data, size_t len)
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
selinux_netlbl_cache_invalidate();
atomic_inc(&flow_cache_genid);
return 0;
@ -1853,6 +1855,7 @@ out:
if (!rc) {
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
atomic_inc(&flow_cache_genid);
}
return rc;
}