mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/qlcnic/qlcnic_init.c net/ipv4/ip_output.c
This commit is contained in:
commit
e40051d134
@ -1142,7 +1142,7 @@ ATLX ETHERNET DRIVERS
|
||||
M: Jay Cliburn <jcliburn@gmail.com>
|
||||
M: Chris Snook <chris.snook@gmail.com>
|
||||
M: Jie Yang <jie.yang@atheros.com>
|
||||
L: atl1-devel@lists.sourceforge.net
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/atl1
|
||||
W: http://atl1.sourceforge.net
|
||||
S: Maintained
|
||||
|
@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
|
||||
|
||||
static LIST_HEAD(dca_domains);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
|
||||
|
||||
static int dca_providers_blocked;
|
||||
|
||||
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
|
||||
kfree(domain);
|
||||
}
|
||||
|
||||
static int dca_provider_ioat_ver_3_0(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
|
||||
((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
|
||||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
|
||||
}
|
||||
|
||||
static void unregister_dca_providers(void)
|
||||
{
|
||||
struct dca_provider *dca, *_dca;
|
||||
struct list_head unregistered_providers;
|
||||
struct dca_domain *domain;
|
||||
unsigned long flags;
|
||||
|
||||
blocking_notifier_call_chain(&dca_provider_chain,
|
||||
DCA_PROVIDER_REMOVE, NULL);
|
||||
|
||||
INIT_LIST_HEAD(&unregistered_providers);
|
||||
|
||||
spin_lock_irqsave(&dca_lock, flags);
|
||||
|
||||
if (list_empty(&dca_domains)) {
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* at this point only one domain in the list is expected */
|
||||
domain = list_first_entry(&dca_domains, struct dca_domain, node);
|
||||
if (!domain)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
|
||||
list_del(&dca->node);
|
||||
list_add(&dca->node, &unregistered_providers);
|
||||
}
|
||||
|
||||
dca_free_domain(domain);
|
||||
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
|
||||
dca_sysfs_remove_provider(dca);
|
||||
list_del(&dca->node);
|
||||
}
|
||||
}
|
||||
|
||||
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
|
||||
{
|
||||
struct dca_domain *domain;
|
||||
@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
|
||||
domain = dca_find_domain(rc);
|
||||
|
||||
if (!domain) {
|
||||
domain = dca_allocate_domain(rc);
|
||||
if (domain)
|
||||
list_add(&domain->node, &dca_domains);
|
||||
if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
|
||||
dca_providers_blocked = 1;
|
||||
} else {
|
||||
domain = dca_allocate_domain(rc);
|
||||
if (domain)
|
||||
list_add(&domain->node, &dca_domains);
|
||||
}
|
||||
}
|
||||
|
||||
return domain;
|
||||
@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_dca_provider);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
|
||||
|
||||
/**
|
||||
* register_dca_provider - register a dca provider
|
||||
* @dca - struct created by alloc_dca_provider()
|
||||
@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
|
||||
unsigned long flags;
|
||||
struct dca_domain *domain;
|
||||
|
||||
spin_lock_irqsave(&dca_lock, flags);
|
||||
if (dca_providers_blocked) {
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
|
||||
err = dca_sysfs_add_provider(dca, dev);
|
||||
if (err)
|
||||
return err;
|
||||
@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
|
||||
spin_lock_irqsave(&dca_lock, flags);
|
||||
domain = dca_get_domain(dev);
|
||||
if (!domain) {
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
if (dca_providers_blocked) {
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
dca_sysfs_remove_provider(dca);
|
||||
unregister_dca_providers();
|
||||
} else {
|
||||
spin_unlock_irqrestore(&dca_lock, flags);
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
list_add(&dca->node, &domain->dca_providers);
|
||||
|
@ -635,6 +635,9 @@ struct vortex_private {
|
||||
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
|
||||
large_frames:1, /* accept large frames */
|
||||
handling_irq:1; /* private in_irq indicator */
|
||||
/* {get|set}_wol operations are already serialized by rtnl.
|
||||
* no additional locking is required for the enable_wol and acpi_set_WOL()
|
||||
*/
|
||||
int drv_flags;
|
||||
u16 status_enable;
|
||||
u16 intr_enable;
|
||||
@ -2939,13 +2942,11 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct vortex_private *vp = netdev_priv(dev);
|
||||
|
||||
spin_lock_irq(&vp->lock);
|
||||
wol->supported = WAKE_MAGIC;
|
||||
|
||||
wol->wolopts = 0;
|
||||
if (vp->enable_wol)
|
||||
wol->wolopts |= WAKE_MAGIC;
|
||||
spin_unlock_irq(&vp->lock);
|
||||
}
|
||||
|
||||
static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
@ -2954,13 +2955,11 @@ static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
if (wol->wolopts & ~WAKE_MAGIC)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&vp->lock);
|
||||
if (wol->wolopts & WAKE_MAGIC)
|
||||
vp->enable_wol = 1;
|
||||
else
|
||||
vp->enable_wol = 0;
|
||||
acpi_set_WOL(dev);
|
||||
spin_unlock_irq(&vp->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
|
||||
|
||||
rrd_ring->desc = NULL;
|
||||
rrd_ring->dma = 0;
|
||||
|
||||
adapter->cmb.dma = 0;
|
||||
adapter->cmb.cmb = NULL;
|
||||
|
||||
adapter->smb.dma = 0;
|
||||
adapter->smb.smb = NULL;
|
||||
}
|
||||
|
||||
static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
|
||||
@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
|
||||
atl1_reset_hw(&adapter->hw);
|
||||
adapter->cmb.cmb->int_stats = 0;
|
||||
|
||||
if (netif_running(netdev))
|
||||
if (netif_running(netdev)) {
|
||||
adapter->cmb.cmb->int_stats = 0;
|
||||
atl1_up(adapter);
|
||||
}
|
||||
netif_device_attach(netdev);
|
||||
|
||||
return 0;
|
||||
|
@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
|
||||
if (!(dev->flags & IFF_MASTER))
|
||||
goto out;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
|
||||
goto out;
|
||||
|
||||
read_lock(&bond->lock);
|
||||
slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
|
||||
orig_dev);
|
||||
|
@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
|
||||
goto out;
|
||||
|
||||
if (skb->len < sizeof(struct arp_pkt)) {
|
||||
pr_debug("Packet is too small to be an ARP\n");
|
||||
goto out;
|
||||
|
@ -2302,6 +2302,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
||||
case CHELSIO_GET_QSET_NUM:{
|
||||
struct ch_reg edata;
|
||||
|
||||
memset(&edata, 0, sizeof(struct ch_reg));
|
||||
|
||||
edata.cmd = CHELSIO_GET_QSET_NUM;
|
||||
edata.val = pi->nqsets;
|
||||
if (copy_to_user(useraddr, &edata, sizeof(edata)))
|
||||
|
@ -57,6 +57,7 @@ enum e1e_registers {
|
||||
E1000_SCTL = 0x00024, /* SerDes Control - RW */
|
||||
E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
|
||||
E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
|
||||
E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
|
||||
E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
|
||||
E1000_FCT = 0x00030, /* Flow Control Type - RW */
|
||||
E1000_VET = 0x00038, /* VLAN Ether Type - RW */
|
||||
|
@ -105,6 +105,10 @@
|
||||
#define E1000_FEXTNVM_SW_CONFIG 1
|
||||
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
|
||||
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
|
||||
|
||||
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
|
||||
|
||||
#define E1000_ICH_RAR_ENTRIES 7
|
||||
@ -125,6 +129,7 @@
|
||||
|
||||
/* SMBus Address Phy Register */
|
||||
#define HV_SMB_ADDR PHY_REG(768, 26)
|
||||
#define HV_SMB_ADDR_MASK 0x007F
|
||||
#define HV_SMB_ADDR_PEC_EN 0x0200
|
||||
#define HV_SMB_ADDR_VALID 0x0080
|
||||
|
||||
@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
|
||||
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
|
||||
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
|
||||
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
|
||||
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
|
||||
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
|
||||
|
||||
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
|
||||
{
|
||||
@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
|
||||
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
u32 ctrl;
|
||||
u32 ctrl, fwsm;
|
||||
s32 ret_val = 0;
|
||||
|
||||
phy->addr = 1;
|
||||
@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
||||
* disabled, then toggle the LANPHYPC Value bit to force
|
||||
* the interconnect to PCIe mode.
|
||||
*/
|
||||
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
|
||||
fwsm = er32(FWSM);
|
||||
if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
|
||||
ctrl = er32(CTRL);
|
||||
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
|
||||
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
|
||||
@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
||||
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
|
||||
ew32(CTRL, ctrl);
|
||||
msleep(50);
|
||||
|
||||
/*
|
||||
* Gate automatic PHY configuration by hardware on
|
||||
* non-managed 82579
|
||||
*/
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
e1000_gate_hw_phy_config_ich8lan(hw, true);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
/* Ungate automatic PHY configuration on non-managed 82579 */
|
||||
if ((hw->mac.type == e1000_pch2lan) &&
|
||||
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
|
||||
msleep(10);
|
||||
e1000_gate_hw_phy_config_ich8lan(hw, false);
|
||||
}
|
||||
|
||||
phy->id = e1000_phy_unknown;
|
||||
ret_val = e1000e_get_phy_id(hw);
|
||||
if (ret_val)
|
||||
@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
|
||||
if (mac->type == e1000_ich8lan)
|
||||
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
|
||||
|
||||
/* Disable PHY configuration by hardware, config by software */
|
||||
if (mac->type == e1000_pch2lan) {
|
||||
u32 extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
|
||||
extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
}
|
||||
/* Gate automatic PHY configuration by hardware on managed 82579 */
|
||||
if ((mac->type == e1000_pch2lan) &&
|
||||
(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
|
||||
e1000_gate_hw_phy_config_ich8lan(hw, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (hw->mac.type == e1000_pch2lan) {
|
||||
ret_val = e1000_k1_workaround_lv(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there was DownShift, must be checked
|
||||
* immediately after link-up
|
||||
@ -894,6 +919,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
|
||||
return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Assumes semaphore already acquired.
|
||||
*
|
||||
**/
|
||||
static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
|
||||
{
|
||||
u16 phy_data;
|
||||
u32 strap = er32(STRAP);
|
||||
s32 ret_val = 0;
|
||||
|
||||
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
|
||||
|
||||
ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
phy_data &= ~HV_SMB_ADDR_MASK;
|
||||
phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
|
||||
phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
|
||||
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
|
||||
* @hw: pointer to the HW structure
|
||||
@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
|
||||
**/
|
||||
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_adapter *adapter = hw->adapter;
|
||||
struct e1000_phy_info *phy = &hw->phy;
|
||||
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
|
||||
s32 ret_val = 0;
|
||||
@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
|
||||
if (phy->type != e1000_phy_igp_3)
|
||||
return ret_val;
|
||||
|
||||
if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
|
||||
if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
|
||||
(hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
|
||||
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
|
||||
break;
|
||||
}
|
||||
@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
|
||||
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
|
||||
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
|
||||
|
||||
if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
|
||||
((hw->mac.type == e1000_pchlan) ||
|
||||
(hw->mac.type == e1000_pch2lan))) {
|
||||
if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
|
||||
(hw->mac.type == e1000_pchlan)) ||
|
||||
(hw->mac.type == e1000_pch2lan)) {
|
||||
/*
|
||||
* HW configures the SMBus address and LEDs when the
|
||||
* OEM and LCD Write Enable bits are set in the NVM.
|
||||
* When both NVM bits are cleared, SW will configure
|
||||
* them instead.
|
||||
*/
|
||||
data = er32(STRAP);
|
||||
data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
|
||||
reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
|
||||
reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
|
||||
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
|
||||
reg_data);
|
||||
ret_val = e1000_write_smbus_addr(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
||||
goto out;
|
||||
|
||||
/* Enable jumbo frame workaround in the PHY */
|
||||
e1e_rphy(hw, PHY_REG(769, 20), &data);
|
||||
ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
|
||||
if (ret_val)
|
||||
goto out;
|
||||
e1e_rphy(hw, PHY_REG(769, 23), &data);
|
||||
data &= ~(0x7F << 5);
|
||||
data |= (0x37 << 5);
|
||||
@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
||||
goto out;
|
||||
e1e_rphy(hw, PHY_REG(769, 16), &data);
|
||||
data &= ~(1 << 13);
|
||||
data |= (1 << 12);
|
||||
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
||||
|
||||
mac_reg = er32(RCTL);
|
||||
mac_reg &= ~E1000_RCTL_SECRC;
|
||||
ew32(FFLT_DBG, mac_reg);
|
||||
ew32(RCTL, mac_reg);
|
||||
|
||||
ret_val = e1000e_read_kmrn_reg(hw,
|
||||
E1000_KMRNCTRLSTA_CTRL_OFFSET,
|
||||
@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
|
||||
goto out;
|
||||
|
||||
/* Write PHY register values back to h/w defaults */
|
||||
e1e_rphy(hw, PHY_REG(769, 20), &data);
|
||||
ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
|
||||
if (ret_val)
|
||||
goto out;
|
||||
e1e_rphy(hw, PHY_REG(769, 23), &data);
|
||||
data &= ~(0x7F << 5);
|
||||
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
e1e_rphy(hw, PHY_REG(769, 16), &data);
|
||||
data &= ~(1 << 12);
|
||||
data |= (1 << 13);
|
||||
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
|
||||
if (ret_val)
|
||||
@ -1558,6 +1596,69 @@ out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_k1_gig_workaround_lv - K1 Si workaround
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Workaround to set the K1 beacon duration for 82579 parts
|
||||
**/
|
||||
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 status_reg = 0;
|
||||
u32 mac_reg;
|
||||
|
||||
if (hw->mac.type != e1000_pch2lan)
|
||||
goto out;
|
||||
|
||||
/* Set K1 beacon duration based on 1Gbps speed or otherwise */
|
||||
ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
|
||||
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
|
||||
mac_reg = er32(FEXTNVM4);
|
||||
mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
|
||||
|
||||
if (status_reg & HV_M_STATUS_SPEED_1000)
|
||||
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
|
||||
else
|
||||
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
|
||||
|
||||
ew32(FEXTNVM4, mac_reg);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
|
||||
* @hw: pointer to the HW structure
|
||||
* @gate: boolean set to true to gate, false to ungate
|
||||
*
|
||||
* Gate/ungate the automatic PHY configuration via hardware; perform
|
||||
* the configuration via software instead.
|
||||
**/
|
||||
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
|
||||
{
|
||||
u32 extcnf_ctrl;
|
||||
|
||||
if (hw->mac.type != e1000_pch2lan)
|
||||
return;
|
||||
|
||||
extcnf_ctrl = er32(EXTCNF_CTRL);
|
||||
|
||||
if (gate)
|
||||
extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
|
||||
else
|
||||
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
|
||||
|
||||
ew32(EXTCNF_CTRL, extcnf_ctrl);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_lan_init_done_ich8lan - Check for PHY config completion
|
||||
* @hw: pointer to the HW structure
|
||||
@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
|
||||
if (e1000_check_reset_block(hw))
|
||||
goto out;
|
||||
|
||||
/* Allow time for h/w to get to quiescent state after reset */
|
||||
msleep(10);
|
||||
|
||||
/* Perform any necessary post-reset workarounds */
|
||||
switch (hw->mac.type) {
|
||||
case e1000_pchlan:
|
||||
@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
|
||||
/* Configure the LCD with the OEM bits in NVM */
|
||||
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
|
||||
|
||||
/* Ungate automatic PHY configuration on non-managed 82579 */
|
||||
if ((hw->mac.type == e1000_pch2lan) &&
|
||||
!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
|
||||
msleep(10);
|
||||
e1000_gate_hw_phy_config_ich8lan(hw, false);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
|
||||
/* Gate automatic PHY configuration by hardware on non-managed 82579 */
|
||||
if ((hw->mac.type == e1000_pch2lan) &&
|
||||
!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
|
||||
e1000_gate_hw_phy_config_ich8lan(hw, true);
|
||||
|
||||
ret_val = e1000e_phy_hw_reset_generic(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
||||
* external PHY is reset.
|
||||
*/
|
||||
ctrl |= E1000_CTRL_PHY_RST;
|
||||
|
||||
/*
|
||||
* Gate automatic PHY configuration by hardware on
|
||||
* non-managed 82579
|
||||
*/
|
||||
if ((hw->mac.type == e1000_pch2lan) &&
|
||||
!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
|
||||
e1000_gate_hw_phy_config_ich8lan(hw, true);
|
||||
}
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
e_dbg("Issuing a global reset to ich8lan\n");
|
||||
@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
|
||||
void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
u32 phy_ctrl;
|
||||
s32 ret_val;
|
||||
|
||||
phy_ctrl = er32(PHY_CTRL);
|
||||
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
|
||||
ew32(PHY_CTRL, phy_ctrl);
|
||||
|
||||
if (hw->mac.type >= e1000_pchlan)
|
||||
e1000_phy_hw_reset_ich8lan(hw);
|
||||
if (hw->mac.type >= e1000_pchlan) {
|
||||
e1000_oem_bits_config_ich8lan(hw, true);
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val)
|
||||
return;
|
||||
e1000_write_smbus_addr(hw);
|
||||
hw->phy.ops.release(hw);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2705,6 +2705,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
||||
u32 psrctl = 0;
|
||||
u32 pages = 0;
|
||||
|
||||
/* Workaround Si errata on 82579 - configure jumbo frame flow */
|
||||
if (hw->mac.type == e1000_pch2lan) {
|
||||
s32 ret_val;
|
||||
|
||||
if (adapter->netdev->mtu > ETH_DATA_LEN)
|
||||
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
|
||||
else
|
||||
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
|
||||
}
|
||||
|
||||
/* Program MC offset vector base */
|
||||
rctl = er32(RCTL);
|
||||
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
|
||||
@ -2745,16 +2755,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
||||
e1e_wphy(hw, 22, phy_data);
|
||||
}
|
||||
|
||||
/* Workaround Si errata on 82579 - configure jumbo frame flow */
|
||||
if (hw->mac.type == e1000_pch2lan) {
|
||||
s32 ret_val;
|
||||
|
||||
if (rctl & E1000_RCTL_LPE)
|
||||
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
|
||||
else
|
||||
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
|
||||
}
|
||||
|
||||
/* Setup buffer sizes */
|
||||
rctl &= ~E1000_RCTL_SZ_4096;
|
||||
rctl |= E1000_RCTL_BSEX;
|
||||
@ -4813,6 +4813,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Jumbo frame workaround on 82579 requires CRC be stripped */
|
||||
if ((adapter->hw.mac.type == e1000_pch2lan) &&
|
||||
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
|
||||
(new_mtu > ETH_DATA_LEN)) {
|
||||
e_err("Jumbo Frames not supported on 82579 when CRC "
|
||||
"stripping is disabled.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 82573 Errata 17 */
|
||||
if (((adapter->hw.mac.type == e1000_82573) ||
|
||||
(adapter->hw.mac.type == e1000_82574)) &&
|
||||
|
@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
|
||||
equalizer_t *eql;
|
||||
master_config_t mc;
|
||||
|
||||
memset(&mc, 0, sizeof(master_config_t));
|
||||
|
||||
if (eql_is_master(dev)) {
|
||||
eql = netdev_priv(dev);
|
||||
mc.max_slaves = eql->max_slaves;
|
||||
|
@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
|
||||
if (dev->emac_irq != NO_IRQ)
|
||||
irq_dispose_mapping(dev->emac_irq);
|
||||
err_free:
|
||||
kfree(ndev);
|
||||
free_netdev(ndev);
|
||||
err_gone:
|
||||
/* if we were on the bootlist, remove us as we won't show up and
|
||||
* wake up all waiters to notify them in case they were waiting
|
||||
@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
|
||||
if (dev->emac_irq != NO_IRQ)
|
||||
irq_dispose_mapping(dev->emac_irq);
|
||||
|
||||
kfree(dev->ndev);
|
||||
free_netdev(dev->ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
|
||||
if (pkt_offset)
|
||||
skb_pull(skb, pkt_offset);
|
||||
|
||||
skb->truesize = skb->len + sizeof(struct sk_buff);
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
napi_gro_receive(&sds_ring->napi, skb);
|
||||
@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
|
||||
|
||||
skb_put(skb, lro_length + data_offset);
|
||||
|
||||
skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
|
||||
|
||||
skb_pull(skb, l2_hdr_offset);
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
|
||||
|
@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
|
||||
* may call phy routines that try to grab the same lock, and that may
|
||||
* lead to a deadlock.
|
||||
*/
|
||||
if (phydev->attached_dev)
|
||||
if (phydev->attached_dev && phydev->adjust_link)
|
||||
phy_stop_machine(phydev);
|
||||
|
||||
if (!mdio_bus_phy_may_suspend(phydev))
|
||||
@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
|
||||
return ret;
|
||||
|
||||
no_resume:
|
||||
if (phydev->attached_dev)
|
||||
if (phydev->attached_dev && phydev->adjust_link)
|
||||
phy_start_machine(phydev, NULL);
|
||||
|
||||
return 0;
|
||||
|
@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
||||
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
||||
i = 0;
|
||||
list_for_each_entry(pch, &ppp->channels, clist) {
|
||||
navail += pch->avail = (pch->chan != NULL);
|
||||
pch->speed = pch->chan->speed;
|
||||
if (pch->chan) {
|
||||
pch->avail = 1;
|
||||
navail++;
|
||||
pch->speed = pch->chan->speed;
|
||||
} else {
|
||||
pch->avail = 0;
|
||||
}
|
||||
if (pch->avail) {
|
||||
if (skb_queue_empty(&pch->file.xq) ||
|
||||
!pch->had_frag) {
|
||||
|
@ -1303,7 +1303,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
skb_reserve(skb, 2);
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
|
||||
dma = pci_map_single(pdev, skb->data,
|
||||
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
|
||||
|
@ -2939,7 +2939,7 @@ static const struct rtl_cfg_info {
|
||||
.hw_start = rtl_hw_start_8168,
|
||||
.region = 2,
|
||||
.align = 8,
|
||||
.intr_event = SYSErr | LinkChg | RxOverflow |
|
||||
.intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
|
||||
TxErr | TxOK | RxOK | RxErr,
|
||||
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
|
||||
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
|
||||
@ -4629,8 +4629,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
|
||||
}
|
||||
|
||||
/* Work around for rx fifo overflow */
|
||||
if (unlikely(status & RxFIFOOver) &&
|
||||
(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
|
||||
if (unlikely(status & RxFIFOOver)) {
|
||||
netif_stop_queue(dev);
|
||||
rtl8169_tx_timeout(dev);
|
||||
break;
|
||||
|
@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
|
||||
free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
|
||||
__ilog2(sizeof(void *)) + 4 : 0);
|
||||
unregister_netdev(ndev);
|
||||
kfree(ndev);
|
||||
free_netdev(ndev);
|
||||
|
||||
list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
|
||||
list_del(&peer->node);
|
||||
|
@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
|
||||
err_out_free_page:
|
||||
free_page((unsigned long) sp->srings);
|
||||
err_out_free_dev:
|
||||
kfree(dev);
|
||||
free_netdev(dev);
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
|
@ -58,6 +58,7 @@
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(SMSC_DRV_VERSION);
|
||||
MODULE_ALIAS("platform:smsc911x");
|
||||
|
||||
#if USE_DEBUG > 0
|
||||
static int debug = 16;
|
||||
|
@ -243,6 +243,7 @@ enum {
|
||||
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
|
||||
NWayRestart = (1 << 12),
|
||||
NonselPortActive = (1 << 9),
|
||||
SelPortActive = (1 << 8),
|
||||
LinkFailStatus = (1 << 2),
|
||||
NetCxnErr = (1 << 1),
|
||||
};
|
||||
@ -364,6 +365,8 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
|
||||
/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
|
||||
static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
|
||||
static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
|
||||
/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
|
||||
static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
|
||||
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
|
||||
|
||||
|
||||
@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data)
|
||||
unsigned int carrier;
|
||||
unsigned long flags;
|
||||
|
||||
/* clear port active bits */
|
||||
dw32(SIAStatus, NonselPortActive | SelPortActive);
|
||||
|
||||
carrier = (status & NetCxnErr) ? 0 : 1;
|
||||
|
||||
if (carrier) {
|
||||
@ -1158,14 +1164,29 @@ no_link_yet:
|
||||
static void de_media_interrupt (struct de_private *de, u32 status)
|
||||
{
|
||||
if (status & LinkPass) {
|
||||
/* Ignore if current media is AUI or BNC and we can't use TP */
|
||||
if ((de->media_type == DE_MEDIA_AUI ||
|
||||
de->media_type == DE_MEDIA_BNC) &&
|
||||
(de->media_lock ||
|
||||
!de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
|
||||
return;
|
||||
/* If current media is not TP, change it to TP */
|
||||
if ((de->media_type == DE_MEDIA_AUI ||
|
||||
de->media_type == DE_MEDIA_BNC)) {
|
||||
de->media_type = DE_MEDIA_TP_AUTO;
|
||||
de_stop_rxtx(de);
|
||||
de_set_media(de);
|
||||
de_start_rxtx(de);
|
||||
}
|
||||
de_link_up(de);
|
||||
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(!(status & LinkFail));
|
||||
|
||||
if (netif_carrier_ok(de->dev)) {
|
||||
/* Mark the link as down only if current media is TP */
|
||||
if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
|
||||
de->media_type != DE_MEDIA_BNC) {
|
||||
de_link_down(de);
|
||||
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
|
||||
}
|
||||
@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de)
|
||||
if (de->de21040)
|
||||
return;
|
||||
|
||||
dw32(CSR13, 0); /* Reset phy */
|
||||
pci_read_config_dword(de->pdev, PCIPM, &pmctl);
|
||||
pmctl |= PM_Sleep;
|
||||
pci_write_config_dword(de->pdev, PCIPM, pmctl);
|
||||
@ -1911,8 +1933,14 @@ fill_defaults:
|
||||
for (i = 0; i < DE_MAX_MEDIA; i++) {
|
||||
if (de->media[i].csr13 == 0xffff)
|
||||
de->media[i].csr13 = t21041_csr13[i];
|
||||
if (de->media[i].csr14 == 0xffff)
|
||||
de->media[i].csr14 = t21041_csr14[i];
|
||||
if (de->media[i].csr14 == 0xffff) {
|
||||
/* autonegotiation is broken at least on some chip
|
||||
revisions - rev. 0x21 works, 0x11 does not */
|
||||
if (de->pdev->revision < 0x20)
|
||||
de->media[i].csr14 = t21041_csr14_brk[i];
|
||||
else
|
||||
de->media[i].csr14 = t21041_csr14[i];
|
||||
}
|
||||
if (de->media[i].csr15 == 0xffff)
|
||||
de->media[i].csr15 = t21041_csr15[i];
|
||||
}
|
||||
@ -2158,6 +2186,8 @@ static int de_resume (struct pci_dev *pdev)
|
||||
dev_err(&dev->dev, "pci_enable_device failed in resume\n");
|
||||
goto out;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
de_init_rings(de);
|
||||
de_init_hw(de);
|
||||
out_attach:
|
||||
netif_device_attach(dev);
|
||||
|
@ -1643,6 +1643,8 @@ static int hso_get_count(struct hso_serial *serial,
|
||||
struct uart_icount cnow;
|
||||
struct hso_tiocmget *tiocmget = serial->tiocmget;
|
||||
|
||||
memset(&icount, 0, sizeof(struct serial_icounter_struct));
|
||||
|
||||
if (!tiocmget)
|
||||
return -ENOENT;
|
||||
spin_lock_irq(&serial->serial_lock);
|
||||
|
@ -2736,6 +2736,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
return -EINVAL;
|
||||
|
||||
if (test_bit(STATUS_SCANNING, &priv->status)) {
|
||||
IWL_DEBUG_INFO(priv, "scan in progress.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode >= IWL_MAX_FORCE_RESET) {
|
||||
IWL_DEBUG_INFO(priv, "invalid reset request.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
|
||||
dev_fsm, dev_fsm_len, GFP_KERNEL);
|
||||
if (priv->fsm == NULL) {
|
||||
CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
|
||||
kfree(dev);
|
||||
free_netdev(dev);
|
||||
return NULL;
|
||||
}
|
||||
fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
|
||||
@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
|
||||
grp = ctcmpc_init_mpc_group(priv);
|
||||
if (grp == NULL) {
|
||||
MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
|
||||
kfree(dev);
|
||||
free_netdev(dev);
|
||||
return NULL;
|
||||
}
|
||||
tasklet_init(&grp->mpc_tasklet2,
|
||||
|
@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
|
||||
int r, nlogs = 0;
|
||||
|
||||
while (datalen > 0) {
|
||||
if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
|
||||
if (unlikely(seg >= VHOST_NET_MAX_SG)) {
|
||||
r = -ENOBUFS;
|
||||
goto err;
|
||||
}
|
||||
|
@ -27,8 +27,6 @@
|
||||
|
||||
#define MAX_LINKS 32
|
||||
|
||||
struct net;
|
||||
|
||||
struct sockaddr_nl {
|
||||
sa_family_t nl_family; /* AF_NETLINK */
|
||||
unsigned short nl_pad; /* zero */
|
||||
@ -151,6 +149,8 @@ struct nlattr {
|
||||
#include <linux/capability.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
struct net;
|
||||
|
||||
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct nlmsghdr *)skb->data;
|
||||
|
@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
local_irq_save(flags);
|
||||
npinfo = rcu_dereference_bh(skb->dev->npinfo);
|
||||
|
||||
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
spin_lock(&npinfo->rx_lock);
|
||||
/* check rx_flags again with the lock held */
|
||||
if (npinfo->rx_flags && __netpoll_rx(skb))
|
||||
ret = true;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
spin_unlock(&npinfo->rx_lock);
|
||||
|
||||
out:
|
||||
rcu_read_unlock_bh();
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
|
||||
* IPv6 Address Label subsystem (addrlabel.c)
|
||||
*/
|
||||
extern int ipv6_addr_label_init(void);
|
||||
extern void ipv6_addr_label_cleanup(void);
|
||||
extern void ipv6_addr_label_rtnl_register(void);
|
||||
extern u32 ipv6_addr_label(struct net *net,
|
||||
const struct in6_addr *addr,
|
||||
|
@ -242,6 +242,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
skb->rxhash = 0;
|
||||
skb_set_queue_mapping(skb, 0);
|
||||
skb_dst_drop(skb);
|
||||
nf_reset(skb);
|
||||
}
|
||||
|
@ -475,8 +475,22 @@ extern unsigned int tcp_current_mss(struct sock *sk);
|
||||
/* Bound MSS / TSO packet size with the half of the window */
|
||||
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
|
||||
{
|
||||
if (tp->max_window && pktsize > (tp->max_window >> 1))
|
||||
return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
|
||||
int cutoff;
|
||||
|
||||
/* When peer uses tiny windows, there is no use in packetizing
|
||||
* to sub-MSS pieces for the sake of SWS or making sure there
|
||||
* are enough packets in the pipe for fast recovery.
|
||||
*
|
||||
* On the other hand, for extremely large MSS devices, handling
|
||||
* smaller than MSS windows in this way does make sense.
|
||||
*/
|
||||
if (tp->max_window >= 512)
|
||||
cutoff = (tp->max_window >> 1);
|
||||
else
|
||||
cutoff = tp->max_window;
|
||||
|
||||
if (cutoff && pktsize > cutoff)
|
||||
return max_t(int, cutoff, 68U - tp->tcp_header_len);
|
||||
else
|
||||
return pktsize;
|
||||
}
|
||||
|
@ -298,8 +298,8 @@ struct xfrm_state_afinfo {
|
||||
const struct xfrm_type *type_map[IPPROTO_MAX];
|
||||
struct xfrm_mode *mode_map[XFRM_MODE_MAX];
|
||||
int (*init_flags)(struct xfrm_state *x);
|
||||
void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
|
||||
struct xfrm_tmpl *tmpl,
|
||||
void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
|
||||
void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr);
|
||||
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
|
||||
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
|
||||
|
@ -217,7 +217,7 @@ source "net/dns_resolver/Kconfig"
|
||||
|
||||
config RPS
|
||||
boolean
|
||||
depends on SMP && SYSFS
|
||||
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
|
||||
default y
|
||||
|
||||
menu "Network testing"
|
||||
|
@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
|
||||
unregister_netdev(net_dev);
|
||||
free_netdev(net_dev);
|
||||
}
|
||||
read_lock_irq(&devs_lock);
|
||||
if (list_empty(&br2684_devs)) {
|
||||
/* last br2684 device */
|
||||
unregister_atmdevice_notifier(&atm_dev_notifier);
|
||||
}
|
||||
read_unlock_irq(&devs_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -675,7 +669,6 @@ static int br2684_create(void __user *arg)
|
||||
|
||||
if (list_empty(&br2684_devs)) {
|
||||
/* 1st br2684 device */
|
||||
register_atmdevice_notifier(&atm_dev_notifier);
|
||||
brdev->number = 1;
|
||||
} else
|
||||
brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
|
||||
@ -815,6 +808,7 @@ static int __init br2684_init(void)
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
register_atm_ioctl(&br2684_ioctl_ops);
|
||||
register_atmdevice_notifier(&atm_dev_notifier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -830,9 +824,7 @@ static void __exit br2684_exit(void)
|
||||
#endif
|
||||
|
||||
|
||||
/* if not already empty */
|
||||
if (!list_empty(&br2684_devs))
|
||||
unregister_atmdevice_notifier(&atm_dev_notifier);
|
||||
unregister_atmdevice_notifier(&atm_dev_notifier);
|
||||
|
||||
while (!list_empty(&br2684_devs)) {
|
||||
net_dev = list_entry_brdev(br2684_devs.next);
|
||||
|
@ -4868,7 +4868,7 @@ static void rollback_registered_many(struct list_head *head)
|
||||
dev = list_first_entry(head, struct net_device, unreg_list);
|
||||
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
|
||||
|
||||
synchronize_net();
|
||||
rcu_barrier();
|
||||
|
||||
list_for_each_entry(dev, head, unreg_list)
|
||||
dev_put(dev);
|
||||
|
@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk)
|
||||
{
|
||||
int uid;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
return uid;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_i_uid);
|
||||
@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk)
|
||||
{
|
||||
unsigned long ino;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
return ino;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_i_ino);
|
||||
|
@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
||||
int mark = 0;
|
||||
|
||||
|
||||
if (len == 8) {
|
||||
if (len == 8 || IGMP_V2_SEEN(in_dev)) {
|
||||
if (ih->code == 0) {
|
||||
/* Alas, old v1 router presents here. */
|
||||
|
||||
|
@ -46,7 +46,7 @@
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/gre.h>
|
||||
|
||||
#ifdef CONFIG_IPV6
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ip6_fib.h>
|
||||
#include <net/ip6_route.h>
|
||||
@ -703,7 +703,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
||||
if ((dst = rt->rt_gateway) == 0)
|
||||
goto tx_error_icmp;
|
||||
}
|
||||
#ifdef CONFIG_IPV6
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
struct in6_addr *addr6;
|
||||
int addr_type;
|
||||
@ -778,7 +778,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
||||
goto tx_error;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_IPV6
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
|
||||
|
||||
@ -854,7 +854,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
|
||||
if ((iph->ttl = tiph->ttl) == 0) {
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
iph->ttl = old_iph->ttl;
|
||||
#ifdef CONFIG_IPV6
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
||||
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
|
||||
#endif
|
||||
|
@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
* we can switch to copy when see the first bad fragment.
|
||||
*/
|
||||
if (skb_has_frag_list(skb)) {
|
||||
struct sk_buff *frag;
|
||||
struct sk_buff *frag, *frag2;
|
||||
int first_len = skb_pagelen(skb);
|
||||
int truesizes = 0;
|
||||
|
||||
if (first_len - hlen > mtu ||
|
||||
((first_len - hlen) & 7) ||
|
||||
@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
if (frag->len > mtu ||
|
||||
((frag->len & 7) && frag->next) ||
|
||||
skb_headroom(frag) < hlen)
|
||||
goto slow_path;
|
||||
goto slow_path_clean;
|
||||
|
||||
/* Partially cloned skb? */
|
||||
if (skb_shared(frag))
|
||||
goto slow_path;
|
||||
goto slow_path_clean;
|
||||
|
||||
BUG_ON(frag->sk);
|
||||
if (skb->sk) {
|
||||
frag->sk = skb->sk;
|
||||
frag->destructor = sock_wfree;
|
||||
}
|
||||
truesizes += frag->truesize;
|
||||
skb->truesize -= frag->truesize;
|
||||
}
|
||||
|
||||
/* Everything is OK. Generate! */
|
||||
@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
frag = skb_shinfo(skb)->frag_list;
|
||||
skb_frag_list_init(skb);
|
||||
skb->data_len = first_len - skb_headlen(skb);
|
||||
skb->truesize -= truesizes;
|
||||
skb->len = first_len;
|
||||
iph->tot_len = htons(first_len);
|
||||
iph->frag_off = htons(IP_MF);
|
||||
@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
}
|
||||
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
|
||||
return err;
|
||||
|
||||
slow_path_clean:
|
||||
skb_walk_frags(skb, frag2) {
|
||||
if (frag2 == frag)
|
||||
break;
|
||||
frag2->sk = NULL;
|
||||
frag2->destructor = NULL;
|
||||
skb->truesize += frag2->truesize;
|
||||
}
|
||||
}
|
||||
|
||||
slow_path:
|
||||
|
@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
||||
case IP_HDRINCL:
|
||||
val = inet->hdrincl;
|
||||
break;
|
||||
case IP_NODEFRAG:
|
||||
val = inet->nodefrag;
|
||||
break;
|
||||
case IP_MTU_DISCOVER:
|
||||
val = inet->pmtudisc;
|
||||
break;
|
||||
|
@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
|
||||
/* ip_route_me_harder expects skb->dst to be set */
|
||||
skb_dst_set_noref(nskb, skb_dst(oldskb));
|
||||
|
||||
nskb->protocol = htons(ETH_P_IP);
|
||||
if (ip_route_me_harder(nskb, addr_type))
|
||||
goto free_nskb;
|
||||
|
||||
|
@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
struct inet_sock *inet = inet_sk(skb->sk);
|
||||
|
||||
if (inet && inet->nodefrag)
|
||||
if (sk && (sk->sk_family == PF_INET) &&
|
||||
inet->nodefrag)
|
||||
return NF_ACCEPT;
|
||||
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
|
@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
|
||||
unsigned char s[4];
|
||||
|
||||
if (offset & 1) {
|
||||
s[0] = s[2] = 0;
|
||||
s[0] = ~0;
|
||||
s[1] = ~*optr;
|
||||
s[2] = 0;
|
||||
s[3] = *nptr;
|
||||
} else {
|
||||
s[1] = s[3] = 0;
|
||||
s[0] = ~*optr;
|
||||
s[1] = ~0;
|
||||
s[2] = *nptr;
|
||||
s[3] = 0;
|
||||
}
|
||||
|
||||
*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
|
||||
|
@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
*/
|
||||
|
||||
mask = 0;
|
||||
if (sk->sk_err)
|
||||
mask = POLLERR;
|
||||
|
||||
/*
|
||||
* POLLHUP is certainly not done right. But poll() doesn't
|
||||
@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
if (tp->urg_data & TCP_URG_VALID)
|
||||
mask |= POLLPRI;
|
||||
}
|
||||
/* This barrier is coupled with smp_wmb() in tcp_reset() */
|
||||
smp_rmb();
|
||||
if (sk->sk_err)
|
||||
mask |= POLLERR;
|
||||
|
||||
return mask;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_poll);
|
||||
|
@ -4035,6 +4035,8 @@ static void tcp_reset(struct sock *sk)
|
||||
default:
|
||||
sk->sk_err = ECONNRESET;
|
||||
}
|
||||
/* This barrier is coupled with smp_rmb() in tcp_poll() */
|
||||
smp_wmb();
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_error_report(sk);
|
||||
|
@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
|
||||
|
||||
static int xfrm4_get_tos(struct flowi *fl)
|
||||
{
|
||||
return fl->fl4_tos;
|
||||
return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
|
||||
}
|
||||
|
||||
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
|
||||
|
@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
|
||||
}
|
||||
|
||||
static void
|
||||
__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
|
||||
struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr)
|
||||
__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
|
||||
{
|
||||
sel->daddr.a4 = fl->fl4_dst;
|
||||
sel->saddr.a4 = fl->fl4_src;
|
||||
sel->dport = xfrm_flowi_dport(fl);
|
||||
sel->dport_mask = htons(0xffff);
|
||||
sel->sport = xfrm_flowi_sport(fl);
|
||||
sel->sport_mask = htons(0xffff);
|
||||
sel->family = AF_INET;
|
||||
sel->prefixlen_d = 32;
|
||||
sel->prefixlen_s = 32;
|
||||
sel->proto = fl->proto;
|
||||
sel->ifindex = fl->oif;
|
||||
}
|
||||
|
||||
static void
|
||||
xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr)
|
||||
{
|
||||
x->sel.daddr.a4 = fl->fl4_dst;
|
||||
x->sel.saddr.a4 = fl->fl4_src;
|
||||
x->sel.dport = xfrm_flowi_dport(fl);
|
||||
x->sel.dport_mask = htons(0xffff);
|
||||
x->sel.sport = xfrm_flowi_sport(fl);
|
||||
x->sel.sport_mask = htons(0xffff);
|
||||
x->sel.family = AF_INET;
|
||||
x->sel.prefixlen_d = 32;
|
||||
x->sel.prefixlen_s = 32;
|
||||
x->sel.proto = fl->proto;
|
||||
x->sel.ifindex = fl->oif;
|
||||
x->id = tmpl->id;
|
||||
if (x->id.daddr.a4 == 0)
|
||||
x->id.daddr.a4 = daddr->a4;
|
||||
@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
|
||||
.owner = THIS_MODULE,
|
||||
.init_flags = xfrm4_init_flags,
|
||||
.init_tempsel = __xfrm4_init_tempsel,
|
||||
.init_temprop = xfrm4_init_temprop,
|
||||
.output = xfrm4_output,
|
||||
.extract_input = xfrm4_extract_input,
|
||||
.extract_output = xfrm4_extract_output,
|
||||
|
@ -4638,10 +4638,12 @@ int __init addrconf_init(void)
|
||||
if (err < 0) {
|
||||
printk(KERN_CRIT "IPv6 Addrconf:"
|
||||
" cannot initialize default policy table: %d.\n", err);
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
register_pernet_subsys(&addrconf_ops);
|
||||
err = register_pernet_subsys(&addrconf_ops);
|
||||
if (err < 0)
|
||||
goto out_addrlabel;
|
||||
|
||||
/* The addrconf netdev notifier requires that loopback_dev
|
||||
* has it's ipv6 private information allocated and setup
|
||||
@ -4693,7 +4695,9 @@ errout:
|
||||
unregister_netdevice_notifier(&ipv6_dev_notf);
|
||||
errlo:
|
||||
unregister_pernet_subsys(&addrconf_ops);
|
||||
|
||||
out_addrlabel:
|
||||
ipv6_addr_label_cleanup();
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -4704,6 +4708,7 @@ void addrconf_cleanup(void)
|
||||
|
||||
unregister_netdevice_notifier(&ipv6_dev_notf);
|
||||
unregister_pernet_subsys(&addrconf_ops);
|
||||
ipv6_addr_label_cleanup();
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
|
@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void)
|
||||
return register_pernet_subsys(&ipv6_addr_label_ops);
|
||||
}
|
||||
|
||||
void ipv6_addr_label_cleanup(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ipv6_addr_label_ops);
|
||||
}
|
||||
|
||||
static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
|
||||
[IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), },
|
||||
[IFAL_LABEL] = { .len = sizeof(u32), },
|
||||
|
@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
|
||||
if (skb_has_frag_list(skb)) {
|
||||
int first_len = skb_pagelen(skb);
|
||||
int truesizes = 0;
|
||||
struct sk_buff *frag2;
|
||||
|
||||
if (first_len - hlen > mtu ||
|
||||
((first_len - hlen) & 7) ||
|
||||
@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
if (frag->len > mtu ||
|
||||
((frag->len & 7) && frag->next) ||
|
||||
skb_headroom(frag) < hlen)
|
||||
goto slow_path;
|
||||
goto slow_path_clean;
|
||||
|
||||
/* Partially cloned skb? */
|
||||
if (skb_shared(frag))
|
||||
goto slow_path;
|
||||
goto slow_path_clean;
|
||||
|
||||
BUG_ON(frag->sk);
|
||||
if (skb->sk) {
|
||||
frag->sk = skb->sk;
|
||||
frag->destructor = sock_wfree;
|
||||
truesizes += frag->truesize;
|
||||
}
|
||||
skb->truesize -= frag->truesize;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
|
||||
first_len = skb_pagelen(skb);
|
||||
skb->data_len = first_len - skb_headlen(skb);
|
||||
skb->truesize -= truesizes;
|
||||
skb->len = first_len;
|
||||
ipv6_hdr(skb)->payload_len = htons(first_len -
|
||||
sizeof(struct ipv6hdr));
|
||||
@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
dst_release(&rt->dst);
|
||||
return err;
|
||||
|
||||
slow_path_clean:
|
||||
skb_walk_frags(skb, frag2) {
|
||||
if (frag2 == frag)
|
||||
break;
|
||||
frag2->sk = NULL;
|
||||
frag2->destructor = NULL;
|
||||
skb->truesize += frag2->truesize;
|
||||
}
|
||||
}
|
||||
|
||||
slow_path:
|
||||
|
@ -20,23 +20,27 @@
|
||||
#include <net/addrconf.h>
|
||||
|
||||
static void
|
||||
__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
|
||||
struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr)
|
||||
__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
|
||||
{
|
||||
/* Initialize temporary selector matching only
|
||||
* to current session. */
|
||||
ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
|
||||
ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
|
||||
x->sel.dport = xfrm_flowi_dport(fl);
|
||||
x->sel.dport_mask = htons(0xffff);
|
||||
x->sel.sport = xfrm_flowi_sport(fl);
|
||||
x->sel.sport_mask = htons(0xffff);
|
||||
x->sel.family = AF_INET6;
|
||||
x->sel.prefixlen_d = 128;
|
||||
x->sel.prefixlen_s = 128;
|
||||
x->sel.proto = fl->proto;
|
||||
x->sel.ifindex = fl->oif;
|
||||
ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
|
||||
ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
|
||||
sel->dport = xfrm_flowi_dport(fl);
|
||||
sel->dport_mask = htons(0xffff);
|
||||
sel->sport = xfrm_flowi_sport(fl);
|
||||
sel->sport_mask = htons(0xffff);
|
||||
sel->family = AF_INET6;
|
||||
sel->prefixlen_d = 128;
|
||||
sel->prefixlen_s = 128;
|
||||
sel->proto = fl->proto;
|
||||
sel->ifindex = fl->oif;
|
||||
}
|
||||
|
||||
static void
|
||||
xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr)
|
||||
{
|
||||
x->id = tmpl->id;
|
||||
if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
|
||||
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
|
||||
@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
|
||||
.eth_proto = htons(ETH_P_IPV6),
|
||||
.owner = THIS_MODULE,
|
||||
.init_tempsel = __xfrm6_init_tempsel,
|
||||
.init_temprop = xfrm6_init_temprop,
|
||||
.tmpl_sort = __xfrm6_tmpl_sort,
|
||||
.state_sort = __xfrm6_state_sort,
|
||||
.output = xfrm6_output,
|
||||
|
@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct llc_sock *llc = llc_sk(sk);
|
||||
int rc = -EINVAL, opt;
|
||||
unsigned int opt;
|
||||
int rc = -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
|
||||
|
@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb)
|
||||
|
||||
int __init llc_station_init(void)
|
||||
{
|
||||
u16 rc = -ENOBUFS;
|
||||
int rc = -ENOBUFS;
|
||||
struct sk_buff *skb;
|
||||
struct llc_station_state_ev *ev;
|
||||
|
||||
|
@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
|
||||
{
|
||||
unsigned int off, len;
|
||||
struct nf_ct_ext_type *t;
|
||||
size_t alloc_size;
|
||||
|
||||
rcu_read_lock();
|
||||
t = rcu_dereference(nf_ct_ext_types[id]);
|
||||
BUG_ON(t == NULL);
|
||||
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
|
||||
len = off + t->len;
|
||||
alloc_size = t->alloc_size;
|
||||
rcu_read_unlock();
|
||||
|
||||
*ext = kzalloc(t->alloc_size, gfp);
|
||||
*ext = kzalloc(alloc_size, gfp);
|
||||
if (!*ext)
|
||||
return NULL;
|
||||
|
||||
|
@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
|
||||
unsigned int msglen, origlen;
|
||||
const char *dptr, *end;
|
||||
s16 diff, tdiff = 0;
|
||||
int ret;
|
||||
int ret = NF_ACCEPT;
|
||||
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
|
||||
|
||||
if (ctinfo != IP_CT_ESTABLISHED &&
|
||||
|
@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb)
|
||||
int
|
||||
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
if (inet_sk(sk)->transparent) {
|
||||
bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
|
||||
inet_twsk(sk)->tw_transparent :
|
||||
inet_sk(sk)->transparent;
|
||||
|
||||
if (transparent) {
|
||||
skb_orphan(skb);
|
||||
skb->sk = sk;
|
||||
skb->destructor = nf_tproxy_destructor;
|
||||
|
@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
|
||||
struct rds_connection *conn;
|
||||
struct rds_tcp_connection *tc;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
conn = sk->sk_user_data;
|
||||
if (!conn) {
|
||||
state_change = sk->sk_state_change;
|
||||
@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
|
||||
break;
|
||||
}
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
state_change(sk);
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
|
||||
|
||||
rdsdebug("listen data ready sk %p\n", sk);
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
ready = sk->sk_user_data;
|
||||
if (!ready) { /* check for teardown race */
|
||||
ready = sk->sk_data_ready;
|
||||
@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
|
||||
queue_work(rds_wq, &rds_tcp_listen_work);
|
||||
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
ready(sk, bytes);
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
|
||||
|
||||
rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
conn = sk->sk_user_data;
|
||||
if (!conn) { /* check for teardown race */
|
||||
ready = sk->sk_data_ready;
|
||||
@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
|
||||
if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
|
||||
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
ready(sk, bytes);
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
|
||||
struct rds_connection *conn;
|
||||
struct rds_tcp_connection *tc;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
conn = sk->sk_user_data;
|
||||
if (!conn) {
|
||||
write_space = sk->sk_write_space;
|
||||
@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
|
||||
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
|
||||
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
/*
|
||||
* write_space is only called when data leaves tcp's send queue if
|
||||
|
@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr->srose_ndigis > ROSE_MAX_DIGIS)
|
||||
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
|
||||
return -EINVAL;
|
||||
|
||||
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
|
||||
@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
|
||||
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr->srose_ndigis > ROSE_MAX_DIGIS)
|
||||
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
|
||||
|
@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
|
||||
error = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
if (!list_empty(&flow->list)) {
|
||||
error = -EEXIST;
|
||||
goto err_out;
|
||||
}
|
||||
} else {
|
||||
int i;
|
||||
unsigned long cl;
|
||||
|
@ -94,7 +94,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
|
||||
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
|
||||
packet, vtag);
|
||||
|
||||
sctp_packet_reset(packet);
|
||||
packet->vtag = vtag;
|
||||
|
||||
if (ecn_capable && sctp_packet_empty(packet)) {
|
||||
|
@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
|
||||
u32 _xid;
|
||||
__be32 *xp;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
dprintk("RPC: xs_udp_data_ready...\n");
|
||||
if (!(xprt = xprt_from_sock(sk)))
|
||||
goto out;
|
||||
@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
|
||||
dropit:
|
||||
skb_free_datagram(sk, skb);
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
|
||||
@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
|
||||
|
||||
dprintk("RPC: xs_tcp_data_ready...\n");
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
if (!(xprt = xprt_from_sock(sk)))
|
||||
goto out;
|
||||
if (xprt->shutdown)
|
||||
@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
|
||||
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
|
||||
} while (read > 0);
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk)
|
||||
{
|
||||
struct rpc_xprt *xprt;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
if (!(xprt = xprt_from_sock(sk)))
|
||||
goto out;
|
||||
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
|
||||
@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk)
|
||||
|
||||
switch (sk->sk_state) {
|
||||
case TCP_ESTABLISHED:
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
spin_lock(&xprt->transport_lock);
|
||||
if (!xprt_test_and_set_connected(xprt)) {
|
||||
struct sock_xprt *transport = container_of(xprt,
|
||||
struct sock_xprt, xprt);
|
||||
@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
|
||||
|
||||
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
||||
}
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
break;
|
||||
case TCP_FIN_WAIT1:
|
||||
/* The client initiated a shutdown of the socket */
|
||||
@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
|
||||
xs_sock_mark_closed(xprt);
|
||||
}
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
|
||||
{
|
||||
struct rpc_xprt *xprt;
|
||||
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
if (!(xprt = xprt_from_sock(sk)))
|
||||
goto out;
|
||||
dprintk("RPC: %s client %p...\n"
|
||||
@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
|
||||
__func__, xprt, sk->sk_err);
|
||||
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
||||
out:
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void xs_write_space(struct sock *sk)
|
||||
@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
|
||||
*/
|
||||
static void xs_udp_write_space(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
|
||||
/* from net/core/sock.c:sock_def_write_space */
|
||||
if (sock_writeable(sk))
|
||||
xs_write_space(sk);
|
||||
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
|
||||
*/
|
||||
static void xs_tcp_write_space(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
|
||||
/* from net/core/stream.c:sk_stream_write_space */
|
||||
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
|
||||
xs_write_space(sk);
|
||||
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
|
||||
|
@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
|
||||
} else if (!iwp->pointer)
|
||||
return -EFAULT;
|
||||
|
||||
extra = kmalloc(extra_size, GFP_KERNEL);
|
||||
extra = kzalloc(extra_size, GFP_KERNEL);
|
||||
if (!extra)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -101,7 +101,7 @@ resume:
|
||||
err = -EHOSTUNREACH;
|
||||
goto error_nolock;
|
||||
}
|
||||
skb_dst_set_noref(skb, dst);
|
||||
skb_dst_set(skb, dst_clone(dst));
|
||||
x = dst->xfrm;
|
||||
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
|
||||
|
||||
|
@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
|
||||
tmpl->mode == XFRM_MODE_BEET) {
|
||||
remote = &tmpl->id.daddr;
|
||||
local = &tmpl->saddr;
|
||||
family = tmpl->encap_family;
|
||||
if (xfrm_addr_any(local, family)) {
|
||||
error = xfrm_get_saddr(net, &tmp, remote, family);
|
||||
if (xfrm_addr_any(local, tmpl->encap_family)) {
|
||||
error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
|
||||
if (error)
|
||||
goto fail;
|
||||
local = &tmp;
|
||||
|
@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
|
||||
EXPORT_SYMBOL(xfrm_sad_getinfo);
|
||||
|
||||
static int
|
||||
xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
|
||||
struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr,
|
||||
unsigned short family)
|
||||
xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
|
||||
struct xfrm_tmpl *tmpl,
|
||||
xfrm_address_t *daddr, xfrm_address_t *saddr,
|
||||
unsigned short family)
|
||||
{
|
||||
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
|
||||
if (!afinfo)
|
||||
return -1;
|
||||
afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
|
||||
afinfo->init_tempsel(&x->sel, fl);
|
||||
|
||||
if (family != tmpl->encap_family) {
|
||||
xfrm_state_put_afinfo(afinfo);
|
||||
afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
|
||||
if (!afinfo)
|
||||
return -1;
|
||||
}
|
||||
afinfo->init_temprop(x, tmpl, daddr, saddr);
|
||||
xfrm_state_put_afinfo(afinfo);
|
||||
return 0;
|
||||
}
|
||||
@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
|
||||
int error = 0;
|
||||
struct xfrm_state *best = NULL;
|
||||
u32 mark = pol->mark.v & pol->mark.m;
|
||||
unsigned short encap_family = tmpl->encap_family;
|
||||
|
||||
to_put = NULL;
|
||||
|
||||
spin_lock_bh(&xfrm_state_lock);
|
||||
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
|
||||
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
|
||||
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
|
||||
if (x->props.family == family &&
|
||||
if (x->props.family == encap_family &&
|
||||
x->props.reqid == tmpl->reqid &&
|
||||
(mark & x->mark.m) == x->mark.v &&
|
||||
!(x->props.flags & XFRM_STATE_WILDRECV) &&
|
||||
xfrm_state_addr_check(x, daddr, saddr, family) &&
|
||||
xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
|
||||
tmpl->mode == x->props.mode &&
|
||||
tmpl->id.proto == x->id.proto &&
|
||||
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
|
||||
xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
|
||||
xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
|
||||
&best, &acquire_in_progress, &error);
|
||||
}
|
||||
if (best)
|
||||
goto found;
|
||||
|
||||
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
|
||||
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
|
||||
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
|
||||
if (x->props.family == family &&
|
||||
if (x->props.family == encap_family &&
|
||||
x->props.reqid == tmpl->reqid &&
|
||||
(mark & x->mark.m) == x->mark.v &&
|
||||
!(x->props.flags & XFRM_STATE_WILDRECV) &&
|
||||
xfrm_state_addr_check(x, daddr, saddr, family) &&
|
||||
xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
|
||||
tmpl->mode == x->props.mode &&
|
||||
tmpl->id.proto == x->id.proto &&
|
||||
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
|
||||
xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
|
||||
xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
|
||||
&best, &acquire_in_progress, &error);
|
||||
}
|
||||
|
||||
@ -829,7 +838,7 @@ found:
|
||||
if (!x && !error && !acquire_in_progress) {
|
||||
if (tmpl->id.spi &&
|
||||
(x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
|
||||
tmpl->id.proto, family)) != NULL) {
|
||||
tmpl->id.proto, encap_family)) != NULL) {
|
||||
to_put = x0;
|
||||
error = -EEXIST;
|
||||
goto out;
|
||||
@ -839,9 +848,9 @@ found:
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/* Initialize temporary selector matching only
|
||||
/* Initialize temporary state matching only
|
||||
* to current session. */
|
||||
xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
|
||||
xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
|
||||
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
|
||||
|
||||
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
|
||||
@ -856,10 +865,10 @@ found:
|
||||
x->km.state = XFRM_STATE_ACQ;
|
||||
list_add(&x->km.all, &net->xfrm.state_all);
|
||||
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
|
||||
h = xfrm_src_hash(net, daddr, saddr, family);
|
||||
h = xfrm_src_hash(net, daddr, saddr, encap_family);
|
||||
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
|
||||
if (x->id.spi) {
|
||||
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
|
||||
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
|
||||
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
|
||||
}
|
||||
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
|
||||
|
Loading…
Reference in New Issue
Block a user