mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NET]: rt_check_expire() can take a long time, add a cond_resched() [ISDN] sc: Really, really fix warning [ISDN] sc: Fix sndpkt to have the correct number of arguments [TCP] FRTO: Clear frto_highmark only after process_frto that uses it [NET]: Remove notifier block from chain when register_netdevice_notifier fails [FS_ENET]: Fix module build. [TCP]: Make sure write_queue_from does not begin with NULL ptr [TCP]: Fix size calculation in sk_stream_alloc_pskb [S2IO]: Fixed memory leak when MSI-X vector allocation fails [BONDING]: Fix resource use after free [SYSCTL]: Fix warning for token-ring from sysctl checker [NET] random : secure_tcp_sequence_number should not assume CONFIG_KTIME_SCALAR [IWLWIFI]: Not correctly dealing with hotunplug. [TCP] FRTO: Plug potential LOST-bit leak [TCP] FRTO: Limit snd_cwnd if TCP was application limited [E1000]: Fix schedule while atomic when called from mii-tool. [NETX]: Fix build failure added by 2.6.24 statistics cleanup. [EP93xx_ETH]: Build fix after 2.6.24 NAPI changes. [PKT_SCHED]: Check subqueue status before calling hard_start_xmit
This commit is contained in:
commit
6f37ac793d
@ -1494,7 +1494,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
|
||||
seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
|
||||
seq += keyptr->count;
|
||||
|
||||
seq += ktime_get_real().tv64;
|
||||
seq += ktime_to_ns(ktime_get_real());
|
||||
|
||||
return seq;
|
||||
}
|
||||
@ -1556,7 +1556,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
|
||||
* overlaps less than one time per MSL (2 minutes).
|
||||
* Choosing a clock of 64 ns period is OK. (period of 274 s)
|
||||
*/
|
||||
seq += ktime_get_real().tv64 >> 6;
|
||||
seq += ktime_to_ns(ktime_get_real()) >> 6;
|
||||
#if 0
|
||||
printk("init_seq(%lx, %lx, %d, %d) = %d\n",
|
||||
saddr, daddr, sport, dport, seq);
|
||||
@ -1616,7 +1616,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
|
||||
seq = half_md4_transform(hash, keyptr->secret);
|
||||
seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
|
||||
|
||||
seq += ktime_get_real().tv64;
|
||||
seq += ktime_to_ns(ktime_get_real());
|
||||
seq &= (1ull << 48) - 1;
|
||||
#if 0
|
||||
printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
|
||||
|
@ -109,7 +109,7 @@ void memcpy_fromshmem(int card, void *dest, const void *src, size_t n);
|
||||
int get_card_from_id(int driver);
|
||||
int indicate_status(int card, int event, ulong Channel, char *Data);
|
||||
irqreturn_t interrupt_handler(int interrupt, void *cardptr);
|
||||
int sndpkt(int devId, int channel, struct sk_buff *data);
|
||||
int sndpkt(int devId, int channel, int ack, struct sk_buff *data);
|
||||
void rcvpkt(int card, RspMessage *rcvmsg);
|
||||
int command(isdn_ctrl *cmd);
|
||||
int reset(int card);
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "message.h"
|
||||
#include "card.h"
|
||||
|
||||
int sndpkt(int devId, int channel, struct sk_buff *data)
|
||||
int sndpkt(int devId, int channel, int ack, struct sk_buff *data)
|
||||
{
|
||||
LLData ReqLnkWrite;
|
||||
int status;
|
||||
|
@ -50,7 +50,7 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n)
|
||||
|
||||
outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80,
|
||||
sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]);
|
||||
memcpy_toio(sc_adapter[card]->rambase + dest_rem, src, n);
|
||||
memcpy_toio((void __iomem *)(sc_adapter[card]->rambase + dest_rem), src, n);
|
||||
spin_unlock_irqrestore(&sc_adapter[card]->lock, flags);
|
||||
pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename,
|
||||
((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
|
||||
|
@ -417,7 +417,7 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
|
||||
|
||||
if (status & REG_INTSTS_RX) {
|
||||
spin_lock(&ep->rx_lock);
|
||||
if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) {
|
||||
if (likely(netif_rx_schedule_prep(dev, &ep->napi))) {
|
||||
wrl(ep, REG_INTEN, REG_INTEN_TX);
|
||||
__netif_rx_schedule(dev, &ep->napi);
|
||||
}
|
||||
|
@ -1847,9 +1847,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||
*/
|
||||
void bond_destroy(struct bonding *bond)
|
||||
{
|
||||
unregister_netdevice(bond->dev);
|
||||
bond_deinit(bond->dev);
|
||||
bond_destroy_sysfs_entry(bond);
|
||||
unregister_netdevice(bond->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4475,8 +4475,8 @@ static void bond_free_all(void)
|
||||
bond_mc_list_destroy(bond);
|
||||
/* Release the bonded slaves */
|
||||
bond_release_all(bond_dev);
|
||||
unregister_netdevice(bond_dev);
|
||||
bond_deinit(bond_dev);
|
||||
unregister_netdevice(bond_dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -4804,6 +4804,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
spin_unlock_irqrestore(&adapter->stats_lock, flags);
|
||||
return -EIO;
|
||||
}
|
||||
spin_unlock_irqrestore(&adapter->stats_lock, flags);
|
||||
if (adapter->hw.media_type == e1000_media_type_copper) {
|
||||
switch (data->reg_num) {
|
||||
case PHY_CTRL:
|
||||
@ -4824,12 +4825,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
DUPLEX_HALF;
|
||||
retval = e1000_set_spd_dplx(adapter,
|
||||
spddplx);
|
||||
if (retval) {
|
||||
spin_unlock_irqrestore(
|
||||
&adapter->stats_lock,
|
||||
flags);
|
||||
if (retval)
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
if (netif_running(adapter->netdev))
|
||||
e1000_reinit_locked(adapter);
|
||||
@ -4838,11 +4835,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
break;
|
||||
case M88E1000_PHY_SPEC_CTRL:
|
||||
case M88E1000_EXT_PHY_SPEC_CTRL:
|
||||
if (e1000_phy_reset(&adapter->hw)) {
|
||||
spin_unlock_irqrestore(
|
||||
&adapter->stats_lock, flags);
|
||||
if (e1000_phy_reset(&adapter->hw))
|
||||
return -EIO;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
@ -4857,7 +4851,6 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&adapter->stats_lock, flags);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -2,6 +2,7 @@ config FS_ENET
|
||||
tristate "Freescale Ethernet Driver"
|
||||
depends on CPM1 || CPM2
|
||||
select MII
|
||||
select PHYLIB
|
||||
|
||||
config FS_ENET_HAS_SCC
|
||||
bool "Chip has an SCC usable for ethernet"
|
||||
@ -11,11 +12,19 @@ config FS_ENET_HAS_SCC
|
||||
config FS_ENET_HAS_FCC
|
||||
bool "Chip has an FCC usable for ethernet"
|
||||
depends on FS_ENET && CPM2
|
||||
select MDIO_BITBANG
|
||||
default y
|
||||
|
||||
config FS_ENET_HAS_FEC
|
||||
bool "Chip has an FEC usable for ethernet"
|
||||
depends on FS_ENET && CPM1
|
||||
select FS_ENET_MDIO_FEC
|
||||
default y
|
||||
|
||||
config FS_ENET_MDIO_FEC
|
||||
tristate "MDIO driver for FEC"
|
||||
depends on FS_ENET && CPM1
|
||||
|
||||
config FS_ENET_MDIO_FCC
|
||||
tristate "MDIO driver for FCC"
|
||||
depends on FS_ENET && CPM2
|
||||
select MDIO_BITBANG
|
||||
|
@ -4,7 +4,16 @@
|
||||
|
||||
obj-$(CONFIG_FS_ENET) += fs_enet.o
|
||||
|
||||
obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
|
||||
obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
|
||||
fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
|
||||
fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
|
||||
fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
|
||||
|
||||
fs_enet-objs := fs_enet-main.o
|
||||
ifeq ($(CONFIG_PPC_CPM_NEW_BINDING),y)
|
||||
obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
|
||||
obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
|
||||
else
|
||||
fs_enet-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
|
||||
fs_enet-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
|
||||
endif
|
||||
|
||||
fs_enet-objs := fs_enet-main.o $(fs_enet-m)
|
||||
|
@ -128,8 +128,8 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
FIFO_PTR_FRAMELEN(len));
|
||||
|
||||
ndev->trans_start = jiffies;
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
@ -155,7 +155,7 @@ static void netx_eth_receive(struct net_device *ndev)
|
||||
if (unlikely(skb == NULL)) {
|
||||
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
|
||||
ndev->name);
|
||||
dev->stats.rx_dropped++;
|
||||
ndev->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@
|
||||
#include "s2io.h"
|
||||
#include "s2io-regs.h"
|
||||
|
||||
#define DRV_VERSION "2.0.26.5"
|
||||
#define DRV_VERSION "2.0.26.6"
|
||||
|
||||
/* S2io Driver name & version. */
|
||||
static char s2io_driver_name[] = "Neterion";
|
||||
@ -3775,6 +3775,40 @@ static int __devinit s2io_test_msi(struct s2io_nic *sp)
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void remove_msix_isr(struct s2io_nic *sp)
|
||||
{
|
||||
int i;
|
||||
u16 msi_control;
|
||||
|
||||
for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
|
||||
if (sp->s2io_entries[i].in_use ==
|
||||
MSIX_REGISTERED_SUCCESS) {
|
||||
int vector = sp->entries[i].vector;
|
||||
void *arg = sp->s2io_entries[i].arg;
|
||||
free_irq(vector, arg);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(sp->entries);
|
||||
kfree(sp->s2io_entries);
|
||||
sp->entries = NULL;
|
||||
sp->s2io_entries = NULL;
|
||||
|
||||
pci_read_config_word(sp->pdev, 0x42, &msi_control);
|
||||
msi_control &= 0xFFFE; /* Disable MSI */
|
||||
pci_write_config_word(sp->pdev, 0x42, msi_control);
|
||||
|
||||
pci_disable_msix(sp->pdev);
|
||||
}
|
||||
|
||||
static void remove_inta_isr(struct s2io_nic *sp)
|
||||
{
|
||||
struct net_device *dev = sp->dev;
|
||||
|
||||
free_irq(sp->pdev->irq, dev);
|
||||
}
|
||||
|
||||
/* ********************************************************* *
|
||||
* Functions defined below concern the OS part of the driver *
|
||||
* ********************************************************* */
|
||||
@ -3809,28 +3843,9 @@ static int s2io_open(struct net_device *dev)
|
||||
int ret = s2io_enable_msi_x(sp);
|
||||
|
||||
if (!ret) {
|
||||
u16 msi_control;
|
||||
|
||||
ret = s2io_test_msi(sp);
|
||||
|
||||
/* rollback MSI-X, will re-enable during add_isr() */
|
||||
kfree(sp->entries);
|
||||
sp->mac_control.stats_info->sw_stat.mem_freed +=
|
||||
(MAX_REQUESTED_MSI_X *
|
||||
sizeof(struct msix_entry));
|
||||
kfree(sp->s2io_entries);
|
||||
sp->mac_control.stats_info->sw_stat.mem_freed +=
|
||||
(MAX_REQUESTED_MSI_X *
|
||||
sizeof(struct s2io_msix_entry));
|
||||
sp->entries = NULL;
|
||||
sp->s2io_entries = NULL;
|
||||
|
||||
pci_read_config_word(sp->pdev, 0x42, &msi_control);
|
||||
msi_control &= 0xFFFE; /* Disable MSI */
|
||||
pci_write_config_word(sp->pdev, 0x42, msi_control);
|
||||
|
||||
pci_disable_msix(sp->pdev);
|
||||
|
||||
remove_msix_isr(sp);
|
||||
}
|
||||
if (ret) {
|
||||
|
||||
@ -6719,15 +6734,22 @@ static int s2io_add_isr(struct s2io_nic * sp)
|
||||
}
|
||||
}
|
||||
if (err) {
|
||||
remove_msix_isr(sp);
|
||||
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
|
||||
"failed\n", dev->name, i);
|
||||
DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
|
||||
return -1;
|
||||
DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
|
||||
dev->name);
|
||||
sp->config.intr_type = INTA;
|
||||
break;
|
||||
}
|
||||
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
|
||||
}
|
||||
printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
|
||||
printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
|
||||
if (!err) {
|
||||
printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
|
||||
msix_tx_cnt);
|
||||
printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
|
||||
msix_rx_cnt);
|
||||
}
|
||||
}
|
||||
if (sp->config.intr_type == INTA) {
|
||||
err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
|
||||
@ -6742,40 +6764,10 @@ static int s2io_add_isr(struct s2io_nic * sp)
|
||||
}
|
||||
static void s2io_rem_isr(struct s2io_nic * sp)
|
||||
{
|
||||
struct net_device *dev = sp->dev;
|
||||
struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
|
||||
|
||||
if (sp->config.intr_type == MSI_X) {
|
||||
int i;
|
||||
u16 msi_control;
|
||||
|
||||
for (i=1; (sp->s2io_entries[i].in_use ==
|
||||
MSIX_REGISTERED_SUCCESS); i++) {
|
||||
int vector = sp->entries[i].vector;
|
||||
void *arg = sp->s2io_entries[i].arg;
|
||||
|
||||
synchronize_irq(vector);
|
||||
free_irq(vector, arg);
|
||||
}
|
||||
|
||||
kfree(sp->entries);
|
||||
stats->mem_freed +=
|
||||
(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
|
||||
kfree(sp->s2io_entries);
|
||||
stats->mem_freed +=
|
||||
(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
|
||||
sp->entries = NULL;
|
||||
sp->s2io_entries = NULL;
|
||||
|
||||
pci_read_config_word(sp->pdev, 0x42, &msi_control);
|
||||
msi_control &= 0xFFFE; /* Disable MSI */
|
||||
pci_write_config_word(sp->pdev, 0x42, msi_control);
|
||||
|
||||
pci_disable_msix(sp->pdev);
|
||||
} else {
|
||||
synchronize_irq(sp->pdev->irq);
|
||||
free_irq(sp->pdev->irq, dev);
|
||||
}
|
||||
if (sp->config.intr_type == MSI_X)
|
||||
remove_msix_isr(sp);
|
||||
else
|
||||
remove_inta_isr(sp);
|
||||
}
|
||||
|
||||
static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
|
||||
|
@ -4850,7 +4850,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
|
||||
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
|
||||
/* Hardware disappeared */
|
||||
IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
|
||||
goto none;
|
||||
goto unplugged;
|
||||
}
|
||||
|
||||
IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
|
||||
@ -4858,6 +4858,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
|
||||
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
unplugged:
|
||||
spin_unlock(&priv->lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -1235,14 +1235,16 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int hdr_len;
|
||||
|
||||
hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
|
||||
skb = alloc_skb_fclone(size + hdr_len, gfp);
|
||||
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
|
||||
if (skb) {
|
||||
skb->truesize += mem;
|
||||
if (sk_stream_wmem_schedule(sk, skb->truesize)) {
|
||||
skb_reserve(skb, hdr_len);
|
||||
/*
|
||||
* Make sure that we have exactly size bytes
|
||||
* available to the caller, no more, no less.
|
||||
*/
|
||||
skb_reserve(skb, skb_tailroom(skb) - size);
|
||||
return skb;
|
||||
}
|
||||
__kfree_skb(skb);
|
||||
|
@ -738,7 +738,7 @@ static struct trans_ctl_table trans_net_table[] = {
|
||||
{ NET_ROSE, "rose", trans_net_rose_table },
|
||||
{ NET_IPV6, "ipv6", trans_net_ipv6_table },
|
||||
{ NET_X25, "x25", trans_net_x25_table },
|
||||
{ NET_TR, "tr", trans_net_tr_table },
|
||||
{ NET_TR, "token-ring", trans_net_tr_table },
|
||||
{ NET_DECNET, "decnet", trans_net_decnet_table },
|
||||
/* NET_ECONET not used */
|
||||
{ NET_SCTP, "sctp", trans_net_sctp_table },
|
||||
|
@ -1171,6 +1171,8 @@ rollback:
|
||||
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
|
||||
}
|
||||
}
|
||||
|
||||
raw_notifier_chain_unregister(&netdev_chain, nb);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -578,6 +578,9 @@ static void rt_check_expire(struct work_struct *work)
|
||||
i = (i + 1) & rt_hash_mask;
|
||||
rthp = &rt_hash_table[i].chain;
|
||||
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
|
||||
if (*rthp == NULL)
|
||||
continue;
|
||||
spin_lock_bh(rt_hash_lock_addr(i));
|
||||
|
@ -1269,6 +1269,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
||||
if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
|
||||
return 0;
|
||||
|
||||
if (!tp->packets_out)
|
||||
goto out;
|
||||
|
||||
/* SACK fastpath:
|
||||
* if the only SACK change is the increase of the end_seq of
|
||||
* the first block then only apply that SACK block
|
||||
@ -1515,6 +1518,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
||||
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
|
||||
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
|
||||
|
||||
out:
|
||||
|
||||
#if FASTRETRANS_DEBUG > 0
|
||||
BUG_TRAP((int)tp->sacked_out >= 0);
|
||||
BUG_TRAP((int)tp->lost_out >= 0);
|
||||
@ -1669,6 +1674,9 @@ void tcp_enter_frto(struct sock *sk)
|
||||
}
|
||||
tcp_verify_left_out(tp);
|
||||
|
||||
/* Too bad if TCP was application limited */
|
||||
tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
|
||||
|
||||
/* Earlier loss recovery underway (see RFC4138; Appendix B).
|
||||
* The last condition is necessary at least in tp->frto_counter case.
|
||||
*/
|
||||
@ -1701,6 +1709,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
|
||||
tcp_for_write_queue(skb, sk) {
|
||||
if (skb == tcp_send_head(sk))
|
||||
break;
|
||||
|
||||
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
|
||||
/*
|
||||
* Count the retransmission made on RTO correctly (only when
|
||||
* waiting for the first ACK and did not get it)...
|
||||
@ -1714,7 +1724,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
|
||||
} else {
|
||||
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
|
||||
tp->undo_marker = 0;
|
||||
TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
|
||||
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
|
||||
}
|
||||
|
||||
/* Don't lost mark skbs that were fwd transmitted after RTO */
|
||||
@ -3103,11 +3113,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
||||
/* See if we can take anything off of the retransmit queue. */
|
||||
flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
|
||||
|
||||
if (tp->frto_counter)
|
||||
frto_cwnd = tcp_process_frto(sk, flag);
|
||||
/* Guarantee sacktag reordering detection against wrap-arounds */
|
||||
if (before(tp->frto_highmark, tp->snd_una))
|
||||
tp->frto_highmark = 0;
|
||||
if (tp->frto_counter)
|
||||
frto_cwnd = tcp_process_frto(sk, flag);
|
||||
|
||||
if (tcp_ack_is_dubious(sk, flag)) {
|
||||
/* Advance CWND, if state allows this. */
|
||||
|
@ -134,7 +134,7 @@ static inline int qdisc_restart(struct net_device *dev)
|
||||
{
|
||||
struct Qdisc *q = dev->qdisc;
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
int ret = NETDEV_TX_BUSY;
|
||||
|
||||
/* Dequeue packet */
|
||||
if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
|
||||
@ -145,7 +145,8 @@ static inline int qdisc_restart(struct net_device *dev)
|
||||
spin_unlock(&dev->queue_lock);
|
||||
|
||||
HARD_TX_LOCK(dev, smp_processor_id());
|
||||
ret = dev_hard_start_xmit(skb, dev);
|
||||
if (!netif_subqueue_stopped(dev, skb))
|
||||
ret = dev_hard_start_xmit(skb, dev);
|
||||
HARD_TX_UNLOCK(dev);
|
||||
|
||||
spin_lock(&dev->queue_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user