mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 01:31:44 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (39 commits) Revert "p54: Use SKB list handling helpers instead of by-hand code." sctp: fix warning at inet_sock_destruct() while release sctp socket tun/tap: Fix crashes if open() /dev/net/tun and then poll() it. dsa: fix 88e6xxx statistics counter snapshotting forcedeth: Fix NAPI race. drivers/net/smsc911x.c: Fix resource size off by 1 error pcnet_cs: add new id bnx2x: Fix the maximal values of coalescing timeouts. bnx2x: Disable HC coalescing when setting timeout to zero. tun: Fix device unregister race be2net: fix spurious interrupt handling in intx mode e1000e: disable K1 at 1000Mbps for 82577/82578 e1000e: delay second read of PHY_STATUS register on failure of first read e1000e: prevent NVM corruption on sectors larger than 4K e1000e: do not write SmartSpeed register bits on parts without support e1000e: delay after LCD reset and proper checks for PHY configuration done e1000e: PHY loopback broken on 82578 ixgbe: Not allow 8259x unsupported wol options change from ethtool ixgbe: fix inconsistent SFP/SFP+ failure results. ixgbe: fix regression on some 82598 adapters ...
This commit is contained in:
commit
eeaecb8619
@ -294,32 +294,33 @@ struct reply_t gigaset_tab_cid[] =
|
|||||||
{RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
|
{RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
|
||||||
{RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
|
{RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
|
||||||
{RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
|
{RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
|
||||||
{RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
|
{RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"},
|
||||||
{RSP_OK, 607,607, -1, 608,-1},
|
{RSP_OK, 607,607, -1, 608,-1},
|
||||||
//{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
|
|
||||||
{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
|
{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
|
||||||
{RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
|
{RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
|
||||||
|
|
||||||
{RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
|
|
||||||
{RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
|
|
||||||
{RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
|
|
||||||
{RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
|
{RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
|
||||||
{EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
|
{EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
|
||||||
|
|
||||||
/* dialing */
|
/* optional dialing responses */
|
||||||
{RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}},
|
{EV_BC_OPEN, 650,650, -1, 651,-1},
|
||||||
{RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}},
|
{RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}},
|
||||||
{RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */
|
{RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}},
|
||||||
|
{RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}},
|
||||||
|
{RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}},
|
||||||
|
|
||||||
/* connection established */
|
/* connect */
|
||||||
{RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
|
{RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
|
||||||
{RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
|
{RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
|
||||||
|
ACT_NOTIFY_BC_UP}},
|
||||||
{EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout
|
{RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}},
|
||||||
|
{RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT,
|
||||||
|
ACT_NOTIFY_BC_UP}},
|
||||||
|
{EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}},
|
||||||
|
|
||||||
/* remote hangup */
|
/* remote hangup */
|
||||||
{RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
|
{RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
|
||||||
{RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
|
{RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
|
||||||
{RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
|
{RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
|
||||||
|
|
||||||
/* hangup */
|
/* hangup */
|
||||||
@ -358,7 +359,8 @@ struct reply_t gigaset_tab_cid[] =
|
|||||||
{RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
|
{RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
|
||||||
{RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
|
{RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
|
||||||
|
|
||||||
{EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}},
|
{EV_BC_OPEN, 750,750, -1, 751,-1},
|
||||||
|
{EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}},
|
||||||
|
|
||||||
/* B channel closed (general case) */
|
/* B channel closed (general case) */
|
||||||
{EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
|
{EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
|
||||||
@ -876,12 +878,6 @@ static void bchannel_down(struct bc_state *bcs)
|
|||||||
|
|
||||||
static void bchannel_up(struct bc_state *bcs)
|
static void bchannel_up(struct bc_state *bcs)
|
||||||
{
|
{
|
||||||
if (!(bcs->chstate & CHS_D_UP)) {
|
|
||||||
dev_notice(bcs->cs->dev, "%s: D channel not up\n", __func__);
|
|
||||||
bcs->chstate |= CHS_D_UP;
|
|
||||||
gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bcs->chstate & CHS_B_UP) {
|
if (bcs->chstate & CHS_B_UP) {
|
||||||
dev_notice(bcs->cs->dev, "%s: B channel already up\n",
|
dev_notice(bcs->cs->dev, "%s: B channel already up\n",
|
||||||
__func__);
|
__func__);
|
||||||
|
@ -174,12 +174,6 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
|
|||||||
pr_err("invalid size %d\n", size);
|
pr_err("invalid size %d\n", size);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
src = iwb->read;
|
|
||||||
if (unlikely(limit >= BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
|
|
||||||
(read < src && limit >= src))) {
|
|
||||||
pr_err("isoc write buffer frame reservation violated\n");
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (read < write) {
|
if (read < write) {
|
||||||
|
@ -55,6 +55,10 @@
|
|||||||
#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */
|
#define MEMBAR_CTRL_INT_CTRL_PFUNC_MASK 0x7 /* bits 26 - 28 */
|
||||||
#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26
|
#define MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT 26
|
||||||
|
|
||||||
|
/********* ISR0 Register offset **********/
|
||||||
|
#define CEV_ISR0_OFFSET 0xC18
|
||||||
|
#define CEV_ISR_SIZE 4
|
||||||
|
|
||||||
/********* Event Q door bell *************/
|
/********* Event Q door bell *************/
|
||||||
#define DB_EQ_OFFSET DB_CQ_OFFSET
|
#define DB_EQ_OFFSET DB_CQ_OFFSET
|
||||||
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
|
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
|
||||||
|
@ -1274,15 +1274,17 @@ static irqreturn_t be_intx(int irq, void *dev)
|
|||||||
{
|
{
|
||||||
struct be_adapter *adapter = dev;
|
struct be_adapter *adapter = dev;
|
||||||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||||
int rx, tx;
|
int isr;
|
||||||
|
|
||||||
tx = event_handle(ctrl, &adapter->tx_eq);
|
isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
|
||||||
rx = event_handle(ctrl, &adapter->rx_eq);
|
ctrl->pci_func * CEV_ISR_SIZE);
|
||||||
|
if (!isr)
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
if (rx || tx)
|
event_handle(ctrl, &adapter->tx_eq);
|
||||||
return IRQ_HANDLED;
|
event_handle(ctrl, &adapter->rx_eq);
|
||||||
else
|
|
||||||
return IRQ_NONE;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t be_msix_rx(int irq, void *dev)
|
static irqreturn_t be_msix_rx(int irq, void *dev)
|
||||||
|
@ -902,6 +902,8 @@ struct bnx2x {
|
|||||||
u16 rx_quick_cons_trip;
|
u16 rx_quick_cons_trip;
|
||||||
u16 rx_ticks_int;
|
u16 rx_ticks_int;
|
||||||
u16 rx_ticks;
|
u16 rx_ticks;
|
||||||
|
/* Maximal coalescing timeout in us */
|
||||||
|
#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
|
||||||
|
|
||||||
u32 lin_cnt;
|
u32 lin_cnt;
|
||||||
|
|
||||||
|
@ -4434,7 +4434,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
|
|||||||
REG_WR16(bp, BAR_USTRORM_INTMEM +
|
REG_WR16(bp, BAR_USTRORM_INTMEM +
|
||||||
USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
|
USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
|
||||||
U_SB_ETH_RX_CQ_INDEX),
|
U_SB_ETH_RX_CQ_INDEX),
|
||||||
bp->rx_ticks ? 0 : 1);
|
(bp->rx_ticks/12) ? 0 : 1);
|
||||||
|
|
||||||
/* HC_INDEX_C_ETH_TX_CQ_CONS */
|
/* HC_INDEX_C_ETH_TX_CQ_CONS */
|
||||||
REG_WR8(bp, BAR_CSTRORM_INTMEM +
|
REG_WR8(bp, BAR_CSTRORM_INTMEM +
|
||||||
@ -4444,7 +4444,7 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
|
|||||||
REG_WR16(bp, BAR_CSTRORM_INTMEM +
|
REG_WR16(bp, BAR_CSTRORM_INTMEM +
|
||||||
CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
|
CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
|
||||||
C_SB_ETH_TX_CQ_INDEX),
|
C_SB_ETH_TX_CQ_INDEX),
|
||||||
bp->tx_ticks ? 0 : 1);
|
(bp->tx_ticks/12) ? 0 : 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -9069,12 +9069,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
|
|||||||
struct bnx2x *bp = netdev_priv(dev);
|
struct bnx2x *bp = netdev_priv(dev);
|
||||||
|
|
||||||
bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
|
bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
|
||||||
if (bp->rx_ticks > 3000)
|
if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
|
||||||
bp->rx_ticks = 3000;
|
bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
|
||||||
|
|
||||||
bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
|
bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
|
||||||
if (bp->tx_ticks > 0x3000)
|
if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
|
||||||
bp->tx_ticks = 0x3000;
|
bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
|
||||||
|
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
bnx2x_update_coalesce(bp);
|
bnx2x_update_coalesce(bp);
|
||||||
|
@ -238,6 +238,7 @@
|
|||||||
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
|
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
|
||||||
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
|
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
|
||||||
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
|
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
|
||||||
|
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
|
||||||
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
|
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
|
||||||
|
|
||||||
/* Constants used to interpret the masked PCI-X bus speed. */
|
/* Constants used to interpret the masked PCI-X bus speed. */
|
||||||
@ -575,6 +576,8 @@
|
|||||||
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
|
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
|
||||||
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
|
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
|
||||||
|
|
||||||
|
#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
|
||||||
|
|
||||||
/* NVM Control */
|
/* NVM Control */
|
||||||
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
|
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
|
||||||
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
|
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
|
||||||
|
@ -215,6 +215,7 @@ enum e1e_registers {
|
|||||||
E1000_SWSM = 0x05B50, /* SW Semaphore */
|
E1000_SWSM = 0x05B50, /* SW Semaphore */
|
||||||
E1000_FWSM = 0x05B54, /* FW Semaphore */
|
E1000_FWSM = 0x05B54, /* FW Semaphore */
|
||||||
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
|
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
|
||||||
|
E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
|
||||||
E1000_HICR = 0x08F00, /* Host Interface Control */
|
E1000_HICR = 0x08F00, /* Host Interface Control */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -302,6 +303,9 @@ enum e1e_registers {
|
|||||||
#define E1000_KMRNCTRLSTA_REN 0x00200000
|
#define E1000_KMRNCTRLSTA_REN 0x00200000
|
||||||
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
|
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
|
||||||
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
|
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
|
||||||
|
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
|
||||||
|
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
|
||||||
|
#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
|
||||||
|
|
||||||
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
|
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
|
||||||
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
|
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
|
||||||
|
@ -338,6 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
|
|||||||
{
|
{
|
||||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||||
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
|
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
|
||||||
|
union ich8_hws_flash_status hsfsts;
|
||||||
u32 gfpreg;
|
u32 gfpreg;
|
||||||
u32 sector_base_addr;
|
u32 sector_base_addr;
|
||||||
u32 sector_end_addr;
|
u32 sector_end_addr;
|
||||||
@ -374,6 +375,20 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
|
|||||||
/* Adjust to word count */
|
/* Adjust to word count */
|
||||||
nvm->flash_bank_size /= sizeof(u16);
|
nvm->flash_bank_size /= sizeof(u16);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the flash bank size does not overwrite the 4k
|
||||||
|
* sector ranges. We may have 64k allotted to us but we only care
|
||||||
|
* about the first 2 4k sectors. Therefore, if we have anything less
|
||||||
|
* than 64k set in the HSFSTS register, we will reduce the bank size
|
||||||
|
* down to 4k and let the rest remain unused. If berasesz == 3, then
|
||||||
|
* we are working in 64k mode. Otherwise we are not.
|
||||||
|
*/
|
||||||
|
if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
|
||||||
|
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
|
||||||
|
if (hsfsts.hsf_status.berasesz != 3)
|
||||||
|
nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
|
||||||
|
}
|
||||||
|
|
||||||
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
|
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
|
||||||
|
|
||||||
/* Clear shadow ram */
|
/* Clear shadow ram */
|
||||||
@ -446,6 +461,95 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
|
||||||
|
* @hw: pointer to the HW structure
|
||||||
|
*
|
||||||
|
* Checks to see of the link status of the hardware has changed. If a
|
||||||
|
* change in link status has been detected, then we read the PHY registers
|
||||||
|
* to get the current speed/duplex if link exists.
|
||||||
|
**/
|
||||||
|
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||||
|
{
|
||||||
|
struct e1000_mac_info *mac = &hw->mac;
|
||||||
|
s32 ret_val;
|
||||||
|
bool link;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We only want to go out to the PHY registers to see if Auto-Neg
|
||||||
|
* has completed and/or if our link status has changed. The
|
||||||
|
* get_link_status flag is set upon receiving a Link Status
|
||||||
|
* Change or Rx Sequence Error interrupt.
|
||||||
|
*/
|
||||||
|
if (!mac->get_link_status) {
|
||||||
|
ret_val = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hw->mac.type == e1000_pchlan) {
|
||||||
|
ret_val = e1000e_write_kmrn_reg(hw,
|
||||||
|
E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||||
|
E1000_KMRNCTRLSTA_K1_ENABLE);
|
||||||
|
if (ret_val)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First we want to see if the MII Status Register reports
|
||||||
|
* link. If so, then we want to get the current speed/duplex
|
||||||
|
* of the PHY.
|
||||||
|
*/
|
||||||
|
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
|
||||||
|
if (ret_val)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (!link)
|
||||||
|
goto out; /* No link detected */
|
||||||
|
|
||||||
|
mac->get_link_status = false;
|
||||||
|
|
||||||
|
if (hw->phy.type == e1000_phy_82578) {
|
||||||
|
ret_val = e1000_link_stall_workaround_hv(hw);
|
||||||
|
if (ret_val)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if there was DownShift, must be checked
|
||||||
|
* immediately after link-up
|
||||||
|
*/
|
||||||
|
e1000e_check_downshift(hw);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are forcing speed/duplex, then we simply return since
|
||||||
|
* we have already determined whether we have link or not.
|
||||||
|
*/
|
||||||
|
if (!mac->autoneg) {
|
||||||
|
ret_val = -E1000_ERR_CONFIG;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Auto-Neg is enabled. Auto Speed Detection takes care
|
||||||
|
* of MAC speed/duplex configuration. So we only need to
|
||||||
|
* configure Collision Distance in the MAC.
|
||||||
|
*/
|
||||||
|
e1000e_config_collision_dist(hw);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Configure Flow Control now that Auto-Neg has completed.
|
||||||
|
* First, we need to restore the desired flow control
|
||||||
|
* settings because we may have had to re-autoneg with a
|
||||||
|
* different link partner.
|
||||||
|
*/
|
||||||
|
ret_val = e1000e_config_fc_after_link_up(hw);
|
||||||
|
if (ret_val)
|
||||||
|
hw_dbg(hw, "Error configuring flow control\n");
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
|
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
|
||||||
{
|
{
|
||||||
struct e1000_hw *hw = &adapter->hw;
|
struct e1000_hw *hw = &adapter->hw;
|
||||||
@ -693,6 +797,38 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
|
|||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* e1000_lan_init_done_ich8lan - Check for PHY config completion
|
||||||
|
* @hw: pointer to the HW structure
|
||||||
|
*
|
||||||
|
* Check the appropriate indication the MAC has finished configuring the
|
||||||
|
* PHY after a software reset.
|
||||||
|
**/
|
||||||
|
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
|
||||||
|
{
|
||||||
|
u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
|
||||||
|
|
||||||
|
/* Wait for basic configuration completes before proceeding */
|
||||||
|
do {
|
||||||
|
data = er32(STATUS);
|
||||||
|
data &= E1000_STATUS_LAN_INIT_DONE;
|
||||||
|
udelay(100);
|
||||||
|
} while ((!data) && --loop);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If basic configuration is incomplete before the above loop
|
||||||
|
* count reaches 0, loading the configuration from NVM will
|
||||||
|
* leave the PHY in a bad state possibly resulting in no link.
|
||||||
|
*/
|
||||||
|
if (loop == 0)
|
||||||
|
hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
|
||||||
|
|
||||||
|
/* Clear the Init Done bit for the next init event */
|
||||||
|
data = er32(STATUS);
|
||||||
|
data &= ~E1000_STATUS_LAN_INIT_DONE;
|
||||||
|
ew32(STATUS, data);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* e1000_phy_hw_reset_ich8lan - Performs a PHY reset
|
* e1000_phy_hw_reset_ich8lan - Performs a PHY reset
|
||||||
* @hw: pointer to the HW structure
|
* @hw: pointer to the HW structure
|
||||||
@ -707,13 +843,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
|
|||||||
u32 i;
|
u32 i;
|
||||||
u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
|
u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
|
||||||
s32 ret_val;
|
s32 ret_val;
|
||||||
u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
|
|
||||||
u16 word_addr, reg_data, reg_addr, phy_page = 0;
|
u16 word_addr, reg_data, reg_addr, phy_page = 0;
|
||||||
|
|
||||||
ret_val = e1000e_phy_hw_reset_generic(hw);
|
ret_val = e1000e_phy_hw_reset_generic(hw);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
|
||||||
|
/* Allow time for h/w to get to a quiescent state after reset */
|
||||||
|
mdelay(10);
|
||||||
|
|
||||||
if (hw->mac.type == e1000_pchlan) {
|
if (hw->mac.type == e1000_pchlan) {
|
||||||
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
|
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
@ -741,26 +879,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
|
|||||||
if (!(data & sw_cfg_mask))
|
if (!(data & sw_cfg_mask))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Wait for basic configuration completes before proceeding*/
|
/* Wait for basic configuration completes before proceeding */
|
||||||
do {
|
e1000_lan_init_done_ich8lan(hw);
|
||||||
data = er32(STATUS);
|
|
||||||
data &= E1000_STATUS_LAN_INIT_DONE;
|
|
||||||
udelay(100);
|
|
||||||
} while ((!data) && --loop);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If basic configuration is incomplete before the above loop
|
|
||||||
* count reaches 0, loading the configuration from NVM will
|
|
||||||
* leave the PHY in a bad state possibly resulting in no link.
|
|
||||||
*/
|
|
||||||
if (loop == 0) {
|
|
||||||
hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Clear the Init Done bit for the next init event */
|
|
||||||
data = er32(STATUS);
|
|
||||||
data &= ~E1000_STATUS_LAN_INIT_DONE;
|
|
||||||
ew32(STATUS, data);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure HW does not configure LCD from PHY
|
* Make sure HW does not configure LCD from PHY
|
||||||
@ -961,12 +1081,14 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
|
|||||||
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
|
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
|
||||||
ew32(PHY_CTRL, phy_ctrl);
|
ew32(PHY_CTRL, phy_ctrl);
|
||||||
|
|
||||||
|
if (phy->type != e1000_phy_igp_3)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call gig speed drop workaround on LPLU before accessing
|
* Call gig speed drop workaround on LPLU before accessing
|
||||||
* any PHY registers
|
* any PHY registers
|
||||||
*/
|
*/
|
||||||
if ((hw->mac.type == e1000_ich8lan) &&
|
if (hw->mac.type == e1000_ich8lan)
|
||||||
(hw->phy.type == e1000_phy_igp_3))
|
|
||||||
e1000e_gig_downshift_workaround_ich8lan(hw);
|
e1000e_gig_downshift_workaround_ich8lan(hw);
|
||||||
|
|
||||||
/* When LPLU is enabled, we should disable SmartSpeed */
|
/* When LPLU is enabled, we should disable SmartSpeed */
|
||||||
@ -979,6 +1101,9 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
|
|||||||
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
|
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
|
||||||
ew32(PHY_CTRL, phy_ctrl);
|
ew32(PHY_CTRL, phy_ctrl);
|
||||||
|
|
||||||
|
if (phy->type != e1000_phy_igp_3)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
|
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
|
||||||
* during Dx states where the power conservation is most
|
* during Dx states where the power conservation is most
|
||||||
@ -1038,6 +1163,10 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
|
|||||||
if (!active) {
|
if (!active) {
|
||||||
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
|
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
|
||||||
ew32(PHY_CTRL, phy_ctrl);
|
ew32(PHY_CTRL, phy_ctrl);
|
||||||
|
|
||||||
|
if (phy->type != e1000_phy_igp_3)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
|
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
|
||||||
* during Dx states where the power conservation is most
|
* during Dx states where the power conservation is most
|
||||||
@ -1073,12 +1202,14 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
|
|||||||
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
|
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
|
||||||
ew32(PHY_CTRL, phy_ctrl);
|
ew32(PHY_CTRL, phy_ctrl);
|
||||||
|
|
||||||
|
if (phy->type != e1000_phy_igp_3)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call gig speed drop workaround on LPLU before accessing
|
* Call gig speed drop workaround on LPLU before accessing
|
||||||
* any PHY registers
|
* any PHY registers
|
||||||
*/
|
*/
|
||||||
if ((hw->mac.type == e1000_ich8lan) &&
|
if (hw->mac.type == e1000_ich8lan)
|
||||||
(hw->phy.type == e1000_phy_igp_3))
|
|
||||||
e1000e_gig_downshift_workaround_ich8lan(hw);
|
e1000e_gig_downshift_workaround_ich8lan(hw);
|
||||||
|
|
||||||
/* When LPLU is enabled, we should disable SmartSpeed */
|
/* When LPLU is enabled, we should disable SmartSpeed */
|
||||||
@ -1905,7 +2036,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
|
|||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
sector_size = ICH_FLASH_SEG_SIZE_4K;
|
sector_size = ICH_FLASH_SEG_SIZE_4K;
|
||||||
iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
|
iteration = 1;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
if (hw->mac.type == e1000_ich9lan) {
|
if (hw->mac.type == e1000_ich9lan) {
|
||||||
@ -1917,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
|
|||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
sector_size = ICH_FLASH_SEG_SIZE_64K;
|
sector_size = ICH_FLASH_SEG_SIZE_64K;
|
||||||
iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
|
iteration = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -E1000_ERR_NVM;
|
return -E1000_ERR_NVM;
|
||||||
@ -2143,6 +2274,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||||||
ctrl = er32(CTRL);
|
ctrl = er32(CTRL);
|
||||||
|
|
||||||
if (!e1000_check_reset_block(hw)) {
|
if (!e1000_check_reset_block(hw)) {
|
||||||
|
/* Clear PHY Reset Asserted bit */
|
||||||
|
if (hw->mac.type >= e1000_pchlan) {
|
||||||
|
u32 status = er32(STATUS);
|
||||||
|
ew32(STATUS, status & ~E1000_STATUS_PHYRA);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PHY HW reset requires MAC CORE reset at the same
|
* PHY HW reset requires MAC CORE reset at the same
|
||||||
* time to make sure the interface between MAC and the
|
* time to make sure the interface between MAC and the
|
||||||
@ -2156,22 +2293,33 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||||||
ew32(CTRL, (ctrl | E1000_CTRL_RST));
|
ew32(CTRL, (ctrl | E1000_CTRL_RST));
|
||||||
msleep(20);
|
msleep(20);
|
||||||
|
|
||||||
if (!ret_val) {
|
if (!ret_val)
|
||||||
/* release the swflag because it is not reset by
|
|
||||||
* hardware reset
|
|
||||||
*/
|
|
||||||
e1000_release_swflag_ich8lan(hw);
|
e1000_release_swflag_ich8lan(hw);
|
||||||
|
|
||||||
|
if (ctrl & E1000_CTRL_PHY_RST)
|
||||||
|
ret_val = hw->phy.ops.get_cfg_done(hw);
|
||||||
|
|
||||||
|
if (hw->mac.type >= e1000_ich10lan) {
|
||||||
|
e1000_lan_init_done_ich8lan(hw);
|
||||||
|
} else {
|
||||||
|
ret_val = e1000e_get_auto_rd_done(hw);
|
||||||
|
if (ret_val) {
|
||||||
|
/*
|
||||||
|
* When auto config read does not complete, do not
|
||||||
|
* return with an error. This can happen in situations
|
||||||
|
* where there is no eeprom and prevents getting link.
|
||||||
|
*/
|
||||||
|
hw_dbg(hw, "Auto Read Done did not complete\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret_val = e1000e_get_auto_rd_done(hw);
|
/*
|
||||||
if (ret_val) {
|
* For PCH, this write will make sure that any noise
|
||||||
/*
|
* will be detected as a CRC error and be dropped rather than show up
|
||||||
* When auto config read does not complete, do not
|
* as a bad packet to the DMA engine.
|
||||||
* return with an error. This can happen in situations
|
*/
|
||||||
* where there is no eeprom and prevents getting link.
|
if (hw->mac.type == e1000_pchlan)
|
||||||
*/
|
ew32(CRC_OFFSET, 0x65656565);
|
||||||
hw_dbg(hw, "Auto Read Done did not complete\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
ew32(IMC, 0xffffffff);
|
ew32(IMC, 0xffffffff);
|
||||||
icr = er32(ICR);
|
icr = er32(ICR);
|
||||||
@ -2222,6 +2370,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||||||
for (i = 0; i < mac->mta_reg_count; i++)
|
for (i = 0; i < mac->mta_reg_count; i++)
|
||||||
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
|
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The 82578 Rx buffer will stall if wakeup is enabled in host and
|
||||||
|
* the ME. Reading the BM_WUC register will clear the host wakeup bit.
|
||||||
|
* Reset the phy after disabling host wakeup to reset the Rx buffer.
|
||||||
|
*/
|
||||||
|
if (hw->phy.type == e1000_phy_82578) {
|
||||||
|
hw->phy.ops.read_phy_reg(hw, BM_WUC, &i);
|
||||||
|
ret_val = e1000_phy_hw_reset_ich8lan(hw);
|
||||||
|
if (ret_val)
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
/* Setup link and flow control */
|
/* Setup link and flow control */
|
||||||
ret_val = e1000_setup_link_ich8lan(hw);
|
ret_val = e1000_setup_link_ich8lan(hw);
|
||||||
|
|
||||||
@ -2253,16 +2413,6 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||||||
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
||||||
ew32(CTRL_EXT, ctrl_ext);
|
ew32(CTRL_EXT, ctrl_ext);
|
||||||
|
|
||||||
/*
|
|
||||||
* The 82578 Rx buffer will stall if wakeup is enabled in host and
|
|
||||||
* the ME. Reading the BM_WUC register will clear the host wakeup bit.
|
|
||||||
* Reset the phy after disabling host wakeup to reset the Rx buffer.
|
|
||||||
*/
|
|
||||||
if (hw->phy.type == e1000_phy_82578) {
|
|
||||||
e1e_rphy(hw, BM_WUC, &i);
|
|
||||||
e1000e_phy_hw_reset_generic(hw);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear all of the statistics registers (clear on read). It is
|
* Clear all of the statistics registers (clear on read). It is
|
||||||
* important that we do this after we have tried to establish link
|
* important that we do this after we have tried to establish link
|
||||||
@ -2485,6 +2635,14 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
|
|||||||
if (ret_val)
|
if (ret_val)
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
|
||||||
|
if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
|
||||||
|
ret_val = e1000e_write_kmrn_reg(hw,
|
||||||
|
E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||||
|
E1000_KMRNCTRLSTA_K1_DISABLE);
|
||||||
|
if (ret_val)
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
if ((hw->mac.type == e1000_ich8lan) &&
|
if ((hw->mac.type == e1000_ich8lan) &&
|
||||||
(hw->phy.type == e1000_phy_igp_3) &&
|
(hw->phy.type == e1000_phy_igp_3) &&
|
||||||
(*speed == SPEED_1000)) {
|
(*speed == SPEED_1000)) {
|
||||||
@ -2850,6 +3008,16 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
|
|||||||
{
|
{
|
||||||
u32 bank = 0;
|
u32 bank = 0;
|
||||||
|
|
||||||
|
if (hw->mac.type >= e1000_pchlan) {
|
||||||
|
u32 status = er32(STATUS);
|
||||||
|
|
||||||
|
if (status & E1000_STATUS_PHYRA)
|
||||||
|
ew32(STATUS, status & ~E1000_STATUS_PHYRA);
|
||||||
|
else
|
||||||
|
hw_dbg(hw,
|
||||||
|
"PHY Reset Asserted not set - needs delay\n");
|
||||||
|
}
|
||||||
|
|
||||||
e1000e_get_cfg_done(hw);
|
e1000e_get_cfg_done(hw);
|
||||||
|
|
||||||
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
|
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
|
||||||
@ -2921,7 +3089,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
|
|||||||
static struct e1000_mac_operations ich8_mac_ops = {
|
static struct e1000_mac_operations ich8_mac_ops = {
|
||||||
.id_led_init = e1000e_id_led_init,
|
.id_led_init = e1000e_id_led_init,
|
||||||
.check_mng_mode = e1000_check_mng_mode_ich8lan,
|
.check_mng_mode = e1000_check_mng_mode_ich8lan,
|
||||||
.check_for_link = e1000e_check_for_copper_link,
|
.check_for_link = e1000_check_for_copper_link_ich8lan,
|
||||||
/* cleanup_led dependent on mac type */
|
/* cleanup_led dependent on mac type */
|
||||||
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
|
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
|
||||||
.get_bus_info = e1000_get_bus_info_ich8lan,
|
.get_bus_info = e1000_get_bus_info_ich8lan,
|
||||||
|
@ -378,12 +378,6 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
|
|||||||
|
|
||||||
mac->get_link_status = 0;
|
mac->get_link_status = 0;
|
||||||
|
|
||||||
if (hw->phy.type == e1000_phy_82578) {
|
|
||||||
ret_val = e1000_link_stall_workaround_hv(hw);
|
|
||||||
if (ret_val)
|
|
||||||
return ret_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if there was DownShift, must be checked
|
* Check if there was DownShift, must be checked
|
||||||
* immediately after link-up
|
* immediately after link-up
|
||||||
|
@ -1531,7 +1531,12 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
|
|||||||
*/
|
*/
|
||||||
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
|
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
break;
|
/*
|
||||||
|
* If the first read fails, another entity may have
|
||||||
|
* ownership of the resources, wait and try again to
|
||||||
|
* see if they have relinquished the resources yet.
|
||||||
|
*/
|
||||||
|
udelay(usec_interval);
|
||||||
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
|
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
break;
|
break;
|
||||||
@ -2737,6 +2742,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
|
|||||||
if (hw->phy.type != e1000_phy_82578)
|
if (hw->phy.type != e1000_phy_82578)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/* Do not apply workaround if in PHY loopback bit 14 set */
|
||||||
|
hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data);
|
||||||
|
if (data & PHY_CONTROL_LB)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* check if link is up and at 1Gbps */
|
/* check if link is up and at 1Gbps */
|
||||||
ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
|
ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
|
@ -3514,11 +3514,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
|
|||||||
nv_msi_workaround(np);
|
nv_msi_workaround(np);
|
||||||
|
|
||||||
#ifdef CONFIG_FORCEDETH_NAPI
|
#ifdef CONFIG_FORCEDETH_NAPI
|
||||||
napi_schedule(&np->napi);
|
if (napi_schedule_prep(&np->napi)) {
|
||||||
|
/*
|
||||||
/* Disable furthur irq's
|
* Disable further irq's (msix not enabled with napi)
|
||||||
(msix not enabled with napi) */
|
*/
|
||||||
writel(0, base + NvRegIrqMask);
|
writel(0, base + NvRegIrqMask);
|
||||||
|
__napi_schedule(&np->napi);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
do
|
do
|
||||||
@ -3615,12 +3617,13 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
|
|||||||
nv_msi_workaround(np);
|
nv_msi_workaround(np);
|
||||||
|
|
||||||
#ifdef CONFIG_FORCEDETH_NAPI
|
#ifdef CONFIG_FORCEDETH_NAPI
|
||||||
napi_schedule(&np->napi);
|
if (napi_schedule_prep(&np->napi)) {
|
||||||
|
/*
|
||||||
/* Disable furthur irq's
|
* Disable further irq's (msix not enabled with napi)
|
||||||
(msix not enabled with napi) */
|
*/
|
||||||
writel(0, base + NvRegIrqMask);
|
writel(0, base + NvRegIrqMask);
|
||||||
|
__napi_schedule(&np->napi);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
|
@ -1830,7 +1830,6 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
wol->supported = 0;
|
wol->supported = 0;
|
||||||
retval = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
|
@ -2697,19 +2697,23 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* For hot-pluggable SFP+ devices, a new SFP+ module may have
|
* For hot-pluggable SFP+ devices, a new SFP+ module may have
|
||||||
* arrived before interrupts were enabled. We need to kick off
|
* arrived before interrupts were enabled but after probe. Such
|
||||||
* the SFP+ module setup first, then try to bring up link.
|
* devices wouldn't have their type identified yet. We need to
|
||||||
|
* kick off the SFP+ module setup first, then try to bring up link.
|
||||||
* If we're not hot-pluggable SFP+, we just need to configure link
|
* If we're not hot-pluggable SFP+, we just need to configure link
|
||||||
* and bring it up.
|
* and bring it up.
|
||||||
*/
|
*/
|
||||||
err = hw->phy.ops.identify(hw);
|
if (hw->phy.type == ixgbe_phy_unknown) {
|
||||||
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
err = hw->phy.ops.identify(hw);
|
||||||
dev_err(&adapter->pdev->dev, "failed to initialize because "
|
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
"an unsupported SFP+ module type was detected.\n"
|
/*
|
||||||
"Reload the driver after installing a supported "
|
* Take the device down and schedule the sfp tasklet
|
||||||
"module.\n");
|
* which will unregister_netdev and log it.
|
||||||
ixgbe_down(adapter);
|
*/
|
||||||
return err;
|
ixgbe_down(adapter);
|
||||||
|
schedule_work(&adapter->sfp_config_module_task);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ixgbe_is_sfp(hw)) {
|
if (ixgbe_is_sfp(hw)) {
|
||||||
@ -3724,7 +3728,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
|
|||||||
if ((hw->phy.type == ixgbe_phy_nl) &&
|
if ((hw->phy.type == ixgbe_phy_nl) &&
|
||||||
(hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
|
(hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
|
||||||
s32 ret = hw->phy.ops.identify_sfp(hw);
|
s32 ret = hw->phy.ops.identify_sfp(hw);
|
||||||
if (ret)
|
if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
|
||||||
goto reschedule;
|
goto reschedule;
|
||||||
ret = hw->phy.ops.reset(hw);
|
ret = hw->phy.ops.reset(hw);
|
||||||
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
@ -4534,13 +4538,17 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
|
|||||||
u32 err;
|
u32 err;
|
||||||
|
|
||||||
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
|
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
|
||||||
|
|
||||||
|
/* Time for electrical oscillations to settle down */
|
||||||
|
msleep(100);
|
||||||
err = hw->phy.ops.identify_sfp(hw);
|
err = hw->phy.ops.identify_sfp(hw);
|
||||||
|
|
||||||
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
dev_err(&adapter->pdev->dev, "failed to initialize because "
|
dev_err(&adapter->pdev->dev, "failed to initialize because "
|
||||||
"an unsupported SFP+ module type was detected.\n"
|
"an unsupported SFP+ module type was detected.\n"
|
||||||
"Reload the driver after installing a supported "
|
"Reload the driver after installing a supported "
|
||||||
"module.\n");
|
"module.\n");
|
||||||
ixgbe_down(adapter);
|
unregister_netdev(adapter->netdev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
hw->mac.ops.setup_sfp(hw);
|
hw->mac.ops.setup_sfp(hw);
|
||||||
|
@ -60,6 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
|
|||||||
|
|
||||||
if (hw->phy.type == ixgbe_phy_unknown) {
|
if (hw->phy.type == ixgbe_phy_unknown) {
|
||||||
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
|
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
|
||||||
|
hw->phy.mdio.prtad = phy_addr;
|
||||||
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
|
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
|
||||||
ixgbe_get_phy_id(hw);
|
ixgbe_get_phy_id(hw);
|
||||||
hw->phy.type =
|
hw->phy.type =
|
||||||
@ -68,6 +69,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* clear value if nothing found */
|
||||||
|
hw->phy.mdio.prtad = 0;
|
||||||
} else {
|
} else {
|
||||||
status = 0;
|
status = 0;
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,18 @@
|
|||||||
#define _NETXEN_NIC_LINUX_SUBVERSION 30
|
#define _NETXEN_NIC_LINUX_SUBVERSION 30
|
||||||
#define NETXEN_NIC_LINUX_VERSIONID "4.0.30"
|
#define NETXEN_NIC_LINUX_VERSIONID "4.0.30"
|
||||||
|
|
||||||
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
|
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
|
||||||
|
#define _major(v) (((v) >> 24) & 0xff)
|
||||||
|
#define _minor(v) (((v) >> 16) & 0xff)
|
||||||
|
#define _build(v) ((v) & 0xffff)
|
||||||
|
|
||||||
|
/* version in image has weird encoding:
|
||||||
|
* 7:0 - major
|
||||||
|
* 15:8 - minor
|
||||||
|
* 31:16 - build (little endian)
|
||||||
|
*/
|
||||||
|
#define NETXEN_DECODE_VERSION(v) \
|
||||||
|
NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
|
||||||
|
|
||||||
#define NETXEN_NUM_FLASH_SECTORS (64)
|
#define NETXEN_NUM_FLASH_SECTORS (64)
|
||||||
#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
|
#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
|
||||||
@ -614,6 +625,7 @@ struct netxen_new_user_info {
|
|||||||
#define NX_P2_MN_ROMIMAGE 0
|
#define NX_P2_MN_ROMIMAGE 0
|
||||||
#define NX_P3_CT_ROMIMAGE 1
|
#define NX_P3_CT_ROMIMAGE 1
|
||||||
#define NX_P3_MN_ROMIMAGE 2
|
#define NX_P3_MN_ROMIMAGE 2
|
||||||
|
#define NX_FLASH_ROMIMAGE 3
|
||||||
|
|
||||||
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */
|
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* for backward compatibility */
|
||||||
|
|
||||||
@ -1243,7 +1255,7 @@ struct netxen_adapter {
|
|||||||
u32 resv3;
|
u32 resv3;
|
||||||
|
|
||||||
u8 has_link_events;
|
u8 has_link_events;
|
||||||
u8 resv1;
|
u8 fw_type;
|
||||||
u16 tx_context_id;
|
u16 tx_context_id;
|
||||||
u16 mtu;
|
u16 mtu;
|
||||||
u16 is_up;
|
u16 is_up;
|
||||||
@ -1387,6 +1399,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter);
|
|||||||
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
|
int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
|
||||||
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
|
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
|
||||||
int netxen_load_firmware(struct netxen_adapter *adapter);
|
int netxen_load_firmware(struct netxen_adapter *adapter);
|
||||||
|
int netxen_need_fw_reset(struct netxen_adapter *adapter);
|
||||||
void netxen_request_firmware(struct netxen_adapter *adapter);
|
void netxen_request_firmware(struct netxen_adapter *adapter);
|
||||||
void netxen_release_firmware(struct netxen_adapter *adapter);
|
void netxen_release_firmware(struct netxen_adapter *adapter);
|
||||||
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
|
int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
|
||||||
|
@ -853,6 +853,7 @@ enum {
|
|||||||
#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
|
#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
|
||||||
|
|
||||||
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
|
#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
|
||||||
|
#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
|
||||||
|
|
||||||
#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
|
#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
|
||||||
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
|
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
|
||||||
|
@ -683,12 +683,85 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
netxen_need_fw_reset(struct netxen_adapter *adapter)
|
||||||
|
{
|
||||||
|
u32 count, old_count;
|
||||||
|
u32 val, version, major, minor, build;
|
||||||
|
int i, timeout;
|
||||||
|
u8 fw_type;
|
||||||
|
|
||||||
|
/* NX2031 firmware doesn't support heartbit */
|
||||||
|
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/* last attempt had failed */
|
||||||
|
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
|
||||||
|
|
||||||
|
for (i = 0; i < 10; i++) {
|
||||||
|
|
||||||
|
timeout = msleep_interruptible(200);
|
||||||
|
if (timeout) {
|
||||||
|
NXWR32(adapter, CRB_CMDPEG_STATE,
|
||||||
|
PHAN_INITIALIZE_FAILED);
|
||||||
|
return -EINTR;
|
||||||
|
}
|
||||||
|
|
||||||
|
count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
|
||||||
|
if (count != old_count)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* firmware is dead */
|
||||||
|
if (count == old_count)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/* check if we have got newer or different file firmware */
|
||||||
|
if (adapter->fw) {
|
||||||
|
|
||||||
|
const struct firmware *fw = adapter->fw;
|
||||||
|
|
||||||
|
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
|
||||||
|
version = NETXEN_DECODE_VERSION(val);
|
||||||
|
|
||||||
|
major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
|
||||||
|
minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
|
||||||
|
build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
|
||||||
|
|
||||||
|
if (version > NETXEN_VERSION_CODE(major, minor, build))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (version == NETXEN_VERSION_CODE(major, minor, build)) {
|
||||||
|
|
||||||
|
val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
|
||||||
|
fw_type = (val & 0x4) ?
|
||||||
|
NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
|
||||||
|
|
||||||
|
if (adapter->fw_type != fw_type)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *fw_name[] = {
|
||||||
|
"nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
netxen_load_firmware(struct netxen_adapter *adapter)
|
netxen_load_firmware(struct netxen_adapter *adapter)
|
||||||
{
|
{
|
||||||
u64 *ptr64;
|
u64 *ptr64;
|
||||||
u32 i, flashaddr, size;
|
u32 i, flashaddr, size;
|
||||||
const struct firmware *fw = adapter->fw;
|
const struct firmware *fw = adapter->fw;
|
||||||
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
|
|
||||||
|
dev_info(&pdev->dev, "loading firmware from %s\n",
|
||||||
|
fw_name[adapter->fw_type]);
|
||||||
|
|
||||||
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
|
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
|
||||||
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
|
NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
|
||||||
@ -756,7 +829,7 @@ static int
|
|||||||
netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
|
netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
|
||||||
{
|
{
|
||||||
__le32 val;
|
__le32 val;
|
||||||
u32 major, minor, build, ver, min_ver, bios;
|
u32 ver, min_ver, bios;
|
||||||
struct pci_dev *pdev = adapter->pdev;
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
const struct firmware *fw = adapter->fw;
|
const struct firmware *fw = adapter->fw;
|
||||||
|
|
||||||
@ -768,21 +841,18 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
|
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
|
||||||
major = (__force u32)val & 0xff;
|
|
||||||
minor = ((__force u32)val >> 8) & 0xff;
|
|
||||||
build = (__force u32)val >> 16;
|
|
||||||
|
|
||||||
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
|
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
|
||||||
min_ver = NETXEN_VERSION_CODE(4, 0, 216);
|
min_ver = NETXEN_VERSION_CODE(4, 0, 216);
|
||||||
else
|
else
|
||||||
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
|
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
|
||||||
|
|
||||||
ver = NETXEN_VERSION_CODE(major, minor, build);
|
ver = NETXEN_DECODE_VERSION(val);
|
||||||
|
|
||||||
if ((major > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
|
if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
"%s: firmware version %d.%d.%d unsupported\n",
|
"%s: firmware version %d.%d.%d unsupported\n",
|
||||||
fwname, major, minor, build);
|
fwname, _major(ver), _minor(ver), _build(ver));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -798,22 +868,21 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
|
|||||||
if (netxen_rom_fast_read(adapter,
|
if (netxen_rom_fast_read(adapter,
|
||||||
NX_FW_VERSION_OFFSET, (int *)&val))
|
NX_FW_VERSION_OFFSET, (int *)&val))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
major = (__force u32)val & 0xff;
|
val = NETXEN_DECODE_VERSION(val);
|
||||||
minor = ((__force u32)val >> 8) & 0xff;
|
if (val > ver) {
|
||||||
build = (__force u32)val >> 16;
|
dev_info(&pdev->dev, "%s: firmware is older than flash\n",
|
||||||
if (NETXEN_VERSION_CODE(major, minor, build) > ver)
|
fwname);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
|
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *fw_name[] = { "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin" };
|
|
||||||
|
|
||||||
void netxen_request_firmware(struct netxen_adapter *adapter)
|
void netxen_request_firmware(struct netxen_adapter *adapter)
|
||||||
{
|
{
|
||||||
u32 capability, flashed_ver;
|
u32 capability, flashed_ver;
|
||||||
int fw_type;
|
u8 fw_type;
|
||||||
struct pci_dev *pdev = adapter->pdev;
|
struct pci_dev *pdev = adapter->pdev;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
@ -830,6 +899,8 @@ request_mn:
|
|||||||
|
|
||||||
netxen_rom_fast_read(adapter,
|
netxen_rom_fast_read(adapter,
|
||||||
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
|
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
|
||||||
|
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
|
||||||
|
|
||||||
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
|
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
|
||||||
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
|
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
|
||||||
if (capability & NX_PEG_TUNE_MN_PRESENT) {
|
if (capability & NX_PEG_TUNE_MN_PRESENT) {
|
||||||
@ -838,6 +909,10 @@ request_mn:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fw_type = NX_FLASH_ROMIMAGE;
|
||||||
|
adapter->fw = NULL;
|
||||||
|
goto done;
|
||||||
|
|
||||||
request_fw:
|
request_fw:
|
||||||
rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
|
rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
@ -846,6 +921,7 @@ request_fw:
|
|||||||
goto request_mn;
|
goto request_mn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fw_type = NX_FLASH_ROMIMAGE;
|
||||||
adapter->fw = NULL;
|
adapter->fw = NULL;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -859,16 +935,13 @@ request_fw:
|
|||||||
goto request_mn;
|
goto request_mn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fw_type = NX_FLASH_ROMIMAGE;
|
||||||
adapter->fw = NULL;
|
adapter->fw = NULL;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (adapter->fw)
|
adapter->fw_type = fw_type;
|
||||||
dev_info(&pdev->dev, "loading firmware from file %s\n",
|
|
||||||
fw_name[fw_type]);
|
|
||||||
else
|
|
||||||
dev_info(&pdev->dev, "loading firmware from flash\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -718,6 +718,10 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
|
|||||||
if (request_fw)
|
if (request_fw)
|
||||||
netxen_request_firmware(adapter);
|
netxen_request_firmware(adapter);
|
||||||
|
|
||||||
|
err = netxen_need_fw_reset(adapter);
|
||||||
|
if (err <= 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (first_boot != 0x55555555) {
|
if (first_boot != 0x55555555) {
|
||||||
NXWR32(adapter, CRB_CMDPEG_STATE, 0);
|
NXWR32(adapter, CRB_CMDPEG_STATE, 0);
|
||||||
netxen_pinit_from_rom(adapter, 0);
|
netxen_pinit_from_rom(adapter, 0);
|
||||||
|
@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
|
|||||||
PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50),
|
PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50),
|
||||||
PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110),
|
PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110),
|
||||||
PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941),
|
PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941),
|
||||||
|
PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826),
|
||||||
PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df),
|
PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df),
|
||||||
PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0),
|
PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0),
|
||||||
PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd),
|
PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd),
|
||||||
|
@ -928,13 +928,32 @@ static void phy_state_machine(struct work_struct *work)
|
|||||||
* Otherwise, it's 0, and we're
|
* Otherwise, it's 0, and we're
|
||||||
* still waiting for AN */
|
* still waiting for AN */
|
||||||
if (err > 0) {
|
if (err > 0) {
|
||||||
phydev->state = PHY_RUNNING;
|
err = phy_read_status(phydev);
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (phydev->link) {
|
||||||
|
phydev->state = PHY_RUNNING;
|
||||||
|
netif_carrier_on(phydev->attached_dev);
|
||||||
|
} else
|
||||||
|
phydev->state = PHY_NOLINK;
|
||||||
|
phydev->adjust_link(phydev->attached_dev);
|
||||||
} else {
|
} else {
|
||||||
phydev->state = PHY_AN;
|
phydev->state = PHY_AN;
|
||||||
phydev->link_timeout = PHY_AN_TIMEOUT;
|
phydev->link_timeout = PHY_AN_TIMEOUT;
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
phydev->state = PHY_RUNNING;
|
err = phy_read_status(phydev);
|
||||||
|
if (err)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (phydev->link) {
|
||||||
|
phydev->state = PHY_RUNNING;
|
||||||
|
netif_carrier_on(phydev->attached_dev);
|
||||||
|
} else
|
||||||
|
phydev->state = PHY_NOLINK;
|
||||||
|
phydev->adjust_link(phydev->attached_dev);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1607,6 +1607,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
|
|||||||
int ql_cam_route_initialize(struct ql_adapter *qdev);
|
int ql_cam_route_initialize(struct ql_adapter *qdev);
|
||||||
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
|
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
|
||||||
int ql_mb_about_fw(struct ql_adapter *qdev);
|
int ql_mb_about_fw(struct ql_adapter *qdev);
|
||||||
|
void ql_link_on(struct ql_adapter *qdev);
|
||||||
|
void ql_link_off(struct ql_adapter *qdev);
|
||||||
|
|
||||||
#if 1
|
#if 1
|
||||||
#define QL_ALL_DUMP
|
#define QL_ALL_DUMP
|
||||||
|
@ -59,7 +59,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
|
|||||||
cqicb->pkt_delay =
|
cqicb->pkt_delay =
|
||||||
cpu_to_le16(qdev->tx_max_coalesced_frames);
|
cpu_to_le16(qdev->tx_max_coalesced_frames);
|
||||||
cqicb->flags = FLAGS_LI;
|
cqicb->flags = FLAGS_LI;
|
||||||
status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
|
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
|
||||||
CFG_LCQ, rx_ring->cq_id);
|
CFG_LCQ, rx_ring->cq_id);
|
||||||
if (status) {
|
if (status) {
|
||||||
QPRINTK(qdev, IFUP, ERR,
|
QPRINTK(qdev, IFUP, ERR,
|
||||||
@ -82,7 +82,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
|
|||||||
cqicb->pkt_delay =
|
cqicb->pkt_delay =
|
||||||
cpu_to_le16(qdev->rx_max_coalesced_frames);
|
cpu_to_le16(qdev->rx_max_coalesced_frames);
|
||||||
cqicb->flags = FLAGS_LI;
|
cqicb->flags = FLAGS_LI;
|
||||||
status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
|
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
|
||||||
CFG_LCQ, rx_ring->cq_id);
|
CFG_LCQ, rx_ring->cq_id);
|
||||||
if (status) {
|
if (status) {
|
||||||
QPRINTK(qdev, IFUP, ERR,
|
QPRINTK(qdev, IFUP, ERR,
|
||||||
|
@ -214,6 +214,10 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
|
||||||
|
if (status)
|
||||||
|
return status;
|
||||||
|
|
||||||
status = ql_wait_cfg(qdev, bit);
|
status = ql_wait_cfg(qdev, bit);
|
||||||
if (status) {
|
if (status) {
|
||||||
QPRINTK(qdev, IFUP, ERR,
|
QPRINTK(qdev, IFUP, ERR,
|
||||||
@ -221,12 +225,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
|
|
||||||
if (status)
|
|
||||||
goto exit;
|
|
||||||
ql_write32(qdev, ICB_L, (u32) map);
|
ql_write32(qdev, ICB_L, (u32) map);
|
||||||
ql_write32(qdev, ICB_H, (u32) (map >> 32));
|
ql_write32(qdev, ICB_H, (u32) (map >> 32));
|
||||||
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
|
|
||||||
|
|
||||||
mask = CFG_Q_MASK | (bit << 16);
|
mask = CFG_Q_MASK | (bit << 16);
|
||||||
value = bit | (q_id << CFG_Q_SHIFT);
|
value = bit | (q_id << CFG_Q_SHIFT);
|
||||||
@ -237,6 +237,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
|
|||||||
*/
|
*/
|
||||||
status = ql_wait_cfg(qdev, bit);
|
status = ql_wait_cfg(qdev, bit);
|
||||||
exit:
|
exit:
|
||||||
|
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
|
||||||
pci_unmap_single(qdev->pdev, map, size, direction);
|
pci_unmap_single(qdev->pdev, map, size, direction);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -412,6 +413,57 @@ exit:
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Set or clear MAC address in hardware. We sometimes
|
||||||
|
* have to clear it to prevent wrong frame routing
|
||||||
|
* especially in a bonding environment.
|
||||||
|
*/
|
||||||
|
static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
|
||||||
|
{
|
||||||
|
int status;
|
||||||
|
char zero_mac_addr[ETH_ALEN];
|
||||||
|
char *addr;
|
||||||
|
|
||||||
|
if (set) {
|
||||||
|
addr = &qdev->ndev->dev_addr[0];
|
||||||
|
QPRINTK(qdev, IFUP, DEBUG,
|
||||||
|
"Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||||
|
addr[0], addr[1], addr[2], addr[3],
|
||||||
|
addr[4], addr[5]);
|
||||||
|
} else {
|
||||||
|
memset(zero_mac_addr, 0, ETH_ALEN);
|
||||||
|
addr = &zero_mac_addr[0];
|
||||||
|
QPRINTK(qdev, IFUP, DEBUG,
|
||||||
|
"Clearing MAC address on %s\n",
|
||||||
|
qdev->ndev->name);
|
||||||
|
}
|
||||||
|
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
|
||||||
|
if (status)
|
||||||
|
return status;
|
||||||
|
status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
|
||||||
|
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
|
||||||
|
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
|
||||||
|
if (status)
|
||||||
|
QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
|
||||||
|
"address.\n");
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ql_link_on(struct ql_adapter *qdev)
|
||||||
|
{
|
||||||
|
QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
|
||||||
|
qdev->ndev->name);
|
||||||
|
netif_carrier_on(qdev->ndev);
|
||||||
|
ql_set_mac_addr(qdev, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ql_link_off(struct ql_adapter *qdev)
|
||||||
|
{
|
||||||
|
QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
|
||||||
|
qdev->ndev->name);
|
||||||
|
netif_carrier_off(qdev->ndev);
|
||||||
|
ql_set_mac_addr(qdev, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Get a specific frame routing value from the CAM.
|
/* Get a specific frame routing value from the CAM.
|
||||||
* Used for debug and reg dump.
|
* Used for debug and reg dump.
|
||||||
*/
|
*/
|
||||||
@ -1628,7 +1680,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
|
|||||||
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
|
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
|
||||||
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
|
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
|
||||||
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
|
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
|
||||||
qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
|
qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
|
||||||
qdev->stats.tx_packets++;
|
qdev->stats.tx_packets++;
|
||||||
dev_kfree_skb(tx_ring_desc->skb);
|
dev_kfree_skb(tx_ring_desc->skb);
|
||||||
tx_ring_desc->skb = NULL;
|
tx_ring_desc->skb = NULL;
|
||||||
@ -1660,13 +1712,13 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
|
|||||||
/* Fire up a handler to reset the MPI processor. */
|
/* Fire up a handler to reset the MPI processor. */
|
||||||
void ql_queue_fw_error(struct ql_adapter *qdev)
|
void ql_queue_fw_error(struct ql_adapter *qdev)
|
||||||
{
|
{
|
||||||
netif_carrier_off(qdev->ndev);
|
ql_link_off(qdev);
|
||||||
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
|
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ql_queue_asic_error(struct ql_adapter *qdev)
|
void ql_queue_asic_error(struct ql_adapter *qdev)
|
||||||
{
|
{
|
||||||
netif_carrier_off(qdev->ndev);
|
ql_link_off(qdev);
|
||||||
ql_disable_interrupts(qdev);
|
ql_disable_interrupts(qdev);
|
||||||
/* Clear adapter up bit to signal the recovery
|
/* Clear adapter up bit to signal the recovery
|
||||||
* process that it shouldn't kill the reset worker
|
* process that it shouldn't kill the reset worker
|
||||||
@ -2104,7 +2156,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
}
|
}
|
||||||
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
|
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
|
||||||
mac_iocb_ptr = tx_ring_desc->queue_entry;
|
mac_iocb_ptr = tx_ring_desc->queue_entry;
|
||||||
memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
|
memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
|
||||||
|
|
||||||
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
|
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
|
||||||
mac_iocb_ptr->tid = tx_ring_desc->index;
|
mac_iocb_ptr->tid = tx_ring_desc->index;
|
||||||
@ -2743,7 +2795,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
|
|||||||
|
|
||||||
ql_init_tx_ring(qdev, tx_ring);
|
ql_init_tx_ring(qdev, tx_ring);
|
||||||
|
|
||||||
err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
|
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
|
||||||
(u16) tx_ring->wq_id);
|
(u16) tx_ring->wq_id);
|
||||||
if (err) {
|
if (err) {
|
||||||
QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
|
QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
|
||||||
@ -3008,7 +3060,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
|
|||||||
int i;
|
int i;
|
||||||
u8 *hash_id = (u8 *) ricb->hash_cq_id;
|
u8 *hash_id = (u8 *) ricb->hash_cq_id;
|
||||||
|
|
||||||
memset((void *)ricb, 0, sizeof(ricb));
|
memset((void *)ricb, 0, sizeof(*ricb));
|
||||||
|
|
||||||
ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
|
ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
|
||||||
ricb->flags =
|
ricb->flags =
|
||||||
@ -3030,7 +3082,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
|
|||||||
|
|
||||||
QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
|
QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
|
||||||
|
|
||||||
status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
|
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
|
||||||
if (status) {
|
if (status) {
|
||||||
QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
|
QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
|
||||||
return status;
|
return status;
|
||||||
@ -3039,25 +3091,40 @@ static int ql_start_rss(struct ql_adapter *qdev)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ql_clear_routing_entries(struct ql_adapter *qdev)
|
||||||
|
{
|
||||||
|
int i, status = 0;
|
||||||
|
|
||||||
|
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
|
||||||
|
if (status)
|
||||||
|
return status;
|
||||||
|
/* Clear all the entries in the routing table. */
|
||||||
|
for (i = 0; i < 16; i++) {
|
||||||
|
status = ql_set_routing_reg(qdev, i, 0, 0);
|
||||||
|
if (status) {
|
||||||
|
QPRINTK(qdev, IFUP, ERR,
|
||||||
|
"Failed to init routing register for CAM "
|
||||||
|
"packets.\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
/* Initialize the frame-to-queue routing. */
|
/* Initialize the frame-to-queue routing. */
|
||||||
static int ql_route_initialize(struct ql_adapter *qdev)
|
static int ql_route_initialize(struct ql_adapter *qdev)
|
||||||
{
|
{
|
||||||
int status = 0;
|
int status = 0;
|
||||||
int i;
|
|
||||||
|
|
||||||
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
|
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
|
|
||||||
/* Clear all the entries in the routing table. */
|
/* Clear all the entries in the routing table. */
|
||||||
for (i = 0; i < 16; i++) {
|
status = ql_clear_routing_entries(qdev);
|
||||||
status = ql_set_routing_reg(qdev, i, 0, 0);
|
if (status)
|
||||||
if (status) {
|
goto exit;
|
||||||
QPRINTK(qdev, IFUP, ERR,
|
|
||||||
"Failed to init routing register for CAM packets.\n");
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
|
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
|
||||||
if (status) {
|
if (status) {
|
||||||
@ -3096,14 +3163,15 @@ exit:
|
|||||||
|
|
||||||
int ql_cam_route_initialize(struct ql_adapter *qdev)
|
int ql_cam_route_initialize(struct ql_adapter *qdev)
|
||||||
{
|
{
|
||||||
int status;
|
int status, set;
|
||||||
|
|
||||||
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
|
/* If check if the link is up and use to
|
||||||
if (status)
|
* determine if we are setting or clearing
|
||||||
return status;
|
* the MAC address in the CAM.
|
||||||
status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
|
*/
|
||||||
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
|
set = ql_read32(qdev, STS);
|
||||||
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
|
set &= qdev->port_link_up;
|
||||||
|
status = ql_set_mac_addr(qdev, set);
|
||||||
if (status) {
|
if (status) {
|
||||||
QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
|
QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
|
||||||
return status;
|
return status;
|
||||||
@ -3210,9 +3278,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
|
|||||||
{
|
{
|
||||||
u32 value;
|
u32 value;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
unsigned long end_jiffies = jiffies +
|
unsigned long end_jiffies;
|
||||||
max((unsigned long)1, usecs_to_jiffies(30));
|
|
||||||
|
|
||||||
|
/* Clear all the entries in the routing table. */
|
||||||
|
status = ql_clear_routing_entries(qdev);
|
||||||
|
if (status) {
|
||||||
|
QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
end_jiffies = jiffies +
|
||||||
|
max((unsigned long)1, usecs_to_jiffies(30));
|
||||||
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
|
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
@ -3252,7 +3328,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
|
|||||||
int i, status = 0;
|
int i, status = 0;
|
||||||
struct rx_ring *rx_ring;
|
struct rx_ring *rx_ring;
|
||||||
|
|
||||||
netif_carrier_off(qdev->ndev);
|
ql_link_off(qdev);
|
||||||
|
|
||||||
/* Don't kill the reset worker thread if we
|
/* Don't kill the reset worker thread if we
|
||||||
* are in the process of recovery.
|
* are in the process of recovery.
|
||||||
@ -3319,8 +3395,12 @@ static int ql_adapter_up(struct ql_adapter *qdev)
|
|||||||
}
|
}
|
||||||
set_bit(QL_ADAPTER_UP, &qdev->flags);
|
set_bit(QL_ADAPTER_UP, &qdev->flags);
|
||||||
ql_alloc_rx_buffers(qdev);
|
ql_alloc_rx_buffers(qdev);
|
||||||
if ((ql_read32(qdev, STS) & qdev->port_init))
|
/* If the port is initialized and the
|
||||||
netif_carrier_on(qdev->ndev);
|
* link is up the turn on the carrier.
|
||||||
|
*/
|
||||||
|
if ((ql_read32(qdev, STS) & qdev->port_init) &&
|
||||||
|
(ql_read32(qdev, STS) & qdev->port_link_up))
|
||||||
|
ql_link_on(qdev);
|
||||||
ql_enable_interrupts(qdev);
|
ql_enable_interrupts(qdev);
|
||||||
ql_enable_all_completion_interrupts(qdev);
|
ql_enable_all_completion_interrupts(qdev);
|
||||||
netif_tx_start_all_queues(qdev->ndev);
|
netif_tx_start_all_queues(qdev->ndev);
|
||||||
@ -3346,11 +3426,6 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
status = ql_request_irq(qdev);
|
status = ql_request_irq(qdev);
|
||||||
if (status)
|
|
||||||
goto err_irq;
|
|
||||||
return status;
|
|
||||||
err_irq:
|
|
||||||
ql_free_mem_resources(qdev);
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3414,7 +3489,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
|
|||||||
|
|
||||||
for (i = 0; i < qdev->tx_ring_count; i++) {
|
for (i = 0; i < qdev->tx_ring_count; i++) {
|
||||||
tx_ring = &qdev->tx_ring[i];
|
tx_ring = &qdev->tx_ring[i];
|
||||||
memset((void *)tx_ring, 0, sizeof(tx_ring));
|
memset((void *)tx_ring, 0, sizeof(*tx_ring));
|
||||||
tx_ring->qdev = qdev;
|
tx_ring->qdev = qdev;
|
||||||
tx_ring->wq_id = i;
|
tx_ring->wq_id = i;
|
||||||
tx_ring->wq_len = qdev->tx_ring_size;
|
tx_ring->wq_len = qdev->tx_ring_size;
|
||||||
@ -3430,7 +3505,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
|
|||||||
|
|
||||||
for (i = 0; i < qdev->rx_ring_count; i++) {
|
for (i = 0; i < qdev->rx_ring_count; i++) {
|
||||||
rx_ring = &qdev->rx_ring[i];
|
rx_ring = &qdev->rx_ring[i];
|
||||||
memset((void *)rx_ring, 0, sizeof(rx_ring));
|
memset((void *)rx_ring, 0, sizeof(*rx_ring));
|
||||||
rx_ring->qdev = qdev;
|
rx_ring->qdev = qdev;
|
||||||
rx_ring->cq_id = i;
|
rx_ring->cq_id = i;
|
||||||
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
|
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
|
||||||
@ -3789,7 +3864,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
|
|||||||
int pos, err = 0;
|
int pos, err = 0;
|
||||||
u16 val16;
|
u16 val16;
|
||||||
|
|
||||||
memset((void *)qdev, 0, sizeof(qdev));
|
memset((void *)qdev, 0, sizeof(*qdev));
|
||||||
err = pci_enable_device(pdev);
|
err = pci_enable_device(pdev);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "PCI device enable failed.\n");
|
dev_err(&pdev->dev, "PCI device enable failed.\n");
|
||||||
@ -3976,7 +4051,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
|
|||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
netif_carrier_off(ndev);
|
ql_link_off(qdev);
|
||||||
ql_display_dev_info(ndev);
|
ql_display_dev_info(ndev);
|
||||||
cards_found++;
|
cards_found++;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -238,7 +238,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
|
|||||||
&qdev->mpi_port_cfg_work, 0);
|
&qdev->mpi_port_cfg_work, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_carrier_on(qdev->ndev);
|
ql_link_on(qdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
|
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
|
||||||
@ -251,7 +251,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
|
|||||||
if (status)
|
if (status)
|
||||||
QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
|
QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
|
||||||
|
|
||||||
netif_carrier_off(qdev->ndev);
|
ql_link_off(qdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
|
static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
|
||||||
@ -849,7 +849,7 @@ void ql_mpi_idc_work(struct work_struct *work)
|
|||||||
case MB_CMD_PORT_RESET:
|
case MB_CMD_PORT_RESET:
|
||||||
case MB_CMD_SET_PORT_CFG:
|
case MB_CMD_SET_PORT_CFG:
|
||||||
case MB_CMD_STOP_FW:
|
case MB_CMD_STOP_FW:
|
||||||
netif_carrier_off(qdev->ndev);
|
ql_link_off(qdev);
|
||||||
/* Signal the resulting link up AEN
|
/* Signal the resulting link up AEN
|
||||||
* that the frame routing and mac addr
|
* that the frame routing and mac addr
|
||||||
* needs to be set.
|
* needs to be set.
|
||||||
|
@ -1938,7 +1938,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
|
|||||||
if (!res)
|
if (!res)
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
|
||||||
release_mem_region(res->start, res->end - res->start);
|
release_mem_region(res->start, resource_size(res));
|
||||||
|
|
||||||
iounmap(pdata->ioaddr);
|
iounmap(pdata->ioaddr);
|
||||||
|
|
||||||
@ -1976,7 +1976,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
|
|||||||
retval = -ENODEV;
|
retval = -ENODEV;
|
||||||
goto out_0;
|
goto out_0;
|
||||||
}
|
}
|
||||||
res_size = res->end - res->start + 1;
|
res_size = resource_size(res);
|
||||||
|
|
||||||
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||||
if (!irq_res) {
|
if (!irq_res) {
|
||||||
@ -2104,7 +2104,7 @@ out_unmap_io_3:
|
|||||||
out_free_netdev_2:
|
out_free_netdev_2:
|
||||||
free_netdev(dev);
|
free_netdev(dev);
|
||||||
out_release_io_1:
|
out_release_io_1:
|
||||||
release_mem_region(res->start, res->end - res->start);
|
release_mem_region(res->start, resource_size(res));
|
||||||
out_0:
|
out_0:
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -486,12 +486,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
|
|||||||
{
|
{
|
||||||
struct tun_file *tfile = file->private_data;
|
struct tun_file *tfile = file->private_data;
|
||||||
struct tun_struct *tun = __tun_get(tfile);
|
struct tun_struct *tun = __tun_get(tfile);
|
||||||
struct sock *sk = tun->sk;
|
struct sock *sk;
|
||||||
unsigned int mask = 0;
|
unsigned int mask = 0;
|
||||||
|
|
||||||
if (!tun)
|
if (!tun)
|
||||||
return POLLERR;
|
return POLLERR;
|
||||||
|
|
||||||
|
sk = tun->sk;
|
||||||
|
|
||||||
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
|
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
|
||||||
|
|
||||||
poll_wait(file, &tun->socket.wait, wait);
|
poll_wait(file, &tun->socket.wait, wait);
|
||||||
@ -1324,20 +1326,22 @@ static int tun_chr_close(struct inode *inode, struct file *file)
|
|||||||
struct tun_file *tfile = file->private_data;
|
struct tun_file *tfile = file->private_data;
|
||||||
struct tun_struct *tun;
|
struct tun_struct *tun;
|
||||||
|
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
tun = __tun_get(tfile);
|
tun = __tun_get(tfile);
|
||||||
if (tun) {
|
if (tun) {
|
||||||
DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
|
struct net_device *dev = tun->dev;
|
||||||
|
|
||||||
|
DBG(KERN_INFO "%s: tun_chr_close\n", dev->name);
|
||||||
|
|
||||||
__tun_detach(tun);
|
__tun_detach(tun);
|
||||||
|
|
||||||
/* If desireable, unregister the netdevice. */
|
/* If desireable, unregister the netdevice. */
|
||||||
if (!(tun->flags & TUN_PERSIST))
|
if (!(tun->flags & TUN_PERSIST)) {
|
||||||
unregister_netdevice(tun->dev);
|
rtnl_lock();
|
||||||
|
if (dev->reg_state == NETREG_REGISTERED)
|
||||||
|
unregister_netdevice(dev);
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
tun = tfile->tun;
|
tun = tfile->tun;
|
||||||
if (tun)
|
if (tun)
|
||||||
|
@ -823,30 +823,30 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||||||
struct p54_tx_info *range;
|
struct p54_tx_info *range;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue)))
|
if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* There used to be a check here to see if the SKB was on the
|
/*
|
||||||
* TX queue or not. This can never happen because all SKBs we
|
* don't try to free an already unlinked skb
|
||||||
* see here successfully went through p54_assign_address()
|
|
||||||
* which means the SKB is on the ->tx_queue.
|
|
||||||
*/
|
*/
|
||||||
|
if (unlikely((!skb->next) || (!skb->prev)))
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->tx_queue.lock, flags);
|
spin_lock_irqsave(&priv->tx_queue.lock, flags);
|
||||||
info = IEEE80211_SKB_CB(skb);
|
info = IEEE80211_SKB_CB(skb);
|
||||||
range = (void *)info->rate_driver_data;
|
range = (void *)info->rate_driver_data;
|
||||||
if (!skb_queue_is_first(&priv->tx_queue, skb)) {
|
if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
|
||||||
struct ieee80211_tx_info *ni;
|
struct ieee80211_tx_info *ni;
|
||||||
struct p54_tx_info *mr;
|
struct p54_tx_info *mr;
|
||||||
|
|
||||||
ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb));
|
ni = IEEE80211_SKB_CB(skb->prev);
|
||||||
mr = (struct p54_tx_info *)ni->rate_driver_data;
|
mr = (struct p54_tx_info *)ni->rate_driver_data;
|
||||||
}
|
}
|
||||||
if (!skb_queue_is_last(&priv->tx_queue, skb)) {
|
if (skb->next != (struct sk_buff *)&priv->tx_queue) {
|
||||||
struct ieee80211_tx_info *ni;
|
struct ieee80211_tx_info *ni;
|
||||||
struct p54_tx_info *mr;
|
struct p54_tx_info *mr;
|
||||||
|
|
||||||
ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb));
|
ni = IEEE80211_SKB_CB(skb->next);
|
||||||
mr = (struct p54_tx_info *)ni->rate_driver_data;
|
mr = (struct p54_tx_info *)ni->rate_driver_data;
|
||||||
}
|
}
|
||||||
__skb_unlink(skb, &priv->tx_queue);
|
__skb_unlink(skb, &priv->tx_queue);
|
||||||
@ -864,13 +864,15 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->tx_queue.lock, flags);
|
spin_lock_irqsave(&priv->tx_queue.lock, flags);
|
||||||
skb_queue_walk(&priv->tx_queue, entry) {
|
entry = priv->tx_queue.next;
|
||||||
|
while (entry != (struct sk_buff *)&priv->tx_queue) {
|
||||||
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
|
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
|
||||||
|
|
||||||
if (hdr->req_id == req_id) {
|
if (hdr->req_id == req_id) {
|
||||||
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
|
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
entry = entry->next;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
|
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -888,22 +890,24 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
|
|||||||
int count, idx;
|
int count, idx;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->tx_queue.lock, flags);
|
spin_lock_irqsave(&priv->tx_queue.lock, flags);
|
||||||
skb_queue_walk(&priv->tx_queue, entry) {
|
entry = (struct sk_buff *) priv->tx_queue.next;
|
||||||
|
while (entry != (struct sk_buff *)&priv->tx_queue) {
|
||||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
|
||||||
struct p54_hdr *entry_hdr;
|
struct p54_hdr *entry_hdr;
|
||||||
struct p54_tx_data *entry_data;
|
struct p54_tx_data *entry_data;
|
||||||
unsigned int pad = 0, frame_len;
|
unsigned int pad = 0, frame_len;
|
||||||
|
|
||||||
range = (void *)info->rate_driver_data;
|
range = (void *)info->rate_driver_data;
|
||||||
if (range->start_addr != addr)
|
if (range->start_addr != addr) {
|
||||||
|
entry = entry->next;
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (!skb_queue_is_last(&priv->tx_queue, entry)) {
|
if (entry->next != (struct sk_buff *)&priv->tx_queue) {
|
||||||
struct ieee80211_tx_info *ni;
|
struct ieee80211_tx_info *ni;
|
||||||
struct p54_tx_info *mr;
|
struct p54_tx_info *mr;
|
||||||
|
|
||||||
ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue,
|
ni = IEEE80211_SKB_CB(entry->next);
|
||||||
entry));
|
|
||||||
mr = (struct p54_tx_info *)ni->rate_driver_data;
|
mr = (struct p54_tx_info *)ni->rate_driver_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1164,21 +1168,23 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_queue_walk(&priv->tx_queue, entry) {
|
entry = priv->tx_queue.next;
|
||||||
|
while (left--) {
|
||||||
u32 hole_size;
|
u32 hole_size;
|
||||||
info = IEEE80211_SKB_CB(entry);
|
info = IEEE80211_SKB_CB(entry);
|
||||||
range = (void *)info->rate_driver_data;
|
range = (void *)info->rate_driver_data;
|
||||||
hole_size = range->start_addr - last_addr;
|
hole_size = range->start_addr - last_addr;
|
||||||
if (!target_skb && hole_size >= len) {
|
if (!target_skb && hole_size >= len) {
|
||||||
target_skb = skb_queue_prev(&priv->tx_queue, entry);
|
target_skb = entry->prev;
|
||||||
hole_size -= len;
|
hole_size -= len;
|
||||||
target_addr = last_addr;
|
target_addr = last_addr;
|
||||||
}
|
}
|
||||||
largest_hole = max(largest_hole, hole_size);
|
largest_hole = max(largest_hole, hole_size);
|
||||||
last_addr = range->end_addr;
|
last_addr = range->end_addr;
|
||||||
|
entry = entry->next;
|
||||||
}
|
}
|
||||||
if (!target_skb && priv->rx_end - last_addr >= len) {
|
if (!target_skb && priv->rx_end - last_addr >= len) {
|
||||||
target_skb = skb_peek_tail(&priv->tx_queue);
|
target_skb = priv->tx_queue.prev;
|
||||||
largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
|
largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
|
||||||
if (!skb_queue_empty(&priv->tx_queue)) {
|
if (!skb_queue_empty(&priv->tx_queue)) {
|
||||||
info = IEEE80211_SKB_CB(target_skb);
|
info = IEEE80211_SKB_CB(target_skb);
|
||||||
@ -2084,6 +2090,7 @@ out:
|
|||||||
static void p54_stop(struct ieee80211_hw *dev)
|
static void p54_stop(struct ieee80211_hw *dev)
|
||||||
{
|
{
|
||||||
struct p54_common *priv = dev->priv;
|
struct p54_common *priv = dev->priv;
|
||||||
|
struct sk_buff *skb;
|
||||||
|
|
||||||
mutex_lock(&priv->conf_mutex);
|
mutex_lock(&priv->conf_mutex);
|
||||||
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
|
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
|
||||||
@ -2098,7 +2105,8 @@ static void p54_stop(struct ieee80211_hw *dev)
|
|||||||
p54_tx_cancel(dev, priv->cached_beacon);
|
p54_tx_cancel(dev, priv->cached_beacon);
|
||||||
|
|
||||||
priv->stop(dev);
|
priv->stop(dev);
|
||||||
skb_queue_purge(&priv->tx_queue);
|
while ((skb = skb_dequeue(&priv->tx_queue)))
|
||||||
|
kfree_skb(skb);
|
||||||
priv->cached_beacon = NULL;
|
priv->cached_beacon = NULL;
|
||||||
priv->tsf_high32 = priv->tsf_low32 = 0;
|
priv->tsf_high32 = priv->tsf_low32 = 0;
|
||||||
mutex_unlock(&priv->conf_mutex);
|
mutex_unlock(&priv->conf_mutex);
|
||||||
|
@ -78,6 +78,7 @@
|
|||||||
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
|
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
|
||||||
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
|
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
|
||||||
#define ETH_P_TIPC 0x88CA /* TIPC */
|
#define ETH_P_TIPC 0x88CA /* TIPC */
|
||||||
|
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
|
||||||
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
|
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
|
||||||
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
|
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
|
||||||
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
|
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
|
||||||
|
@ -735,12 +735,14 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
|
|||||||
if (!*p)
|
if (!*p)
|
||||||
continue;
|
continue;
|
||||||
token = match_token(p, tokens, args);
|
token = match_token(p, tokens, args);
|
||||||
r = match_int(&args[0], &option);
|
if (token != Opt_err) {
|
||||||
if (r < 0) {
|
r = match_int(&args[0], &option);
|
||||||
P9_DPRINTK(P9_DEBUG_ERROR,
|
if (r < 0) {
|
||||||
"integer field, but no integer?\n");
|
P9_DPRINTK(P9_DEBUG_ERROR,
|
||||||
ret = r;
|
"integer field, but no integer?\n");
|
||||||
continue;
|
ret = r;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
switch (token) {
|
switch (token) {
|
||||||
case Opt_port:
|
case Opt_port:
|
||||||
|
@ -418,7 +418,7 @@ static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 10; i++) {
|
for (i = 0; i < 10; i++) {
|
||||||
ret = REG_READ(REG_GLOBAL2, 0x1d);
|
ret = REG_READ(REG_GLOBAL, 0x1d);
|
||||||
if ((ret & 0x8000) == 0)
|
if ((ret & 0x8000) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||||||
case IPPROTO_TCP:
|
case IPPROTO_TCP:
|
||||||
case IPPROTO_SCTP:
|
case IPPROTO_SCTP:
|
||||||
case IPPROTO_DCCP:
|
case IPPROTO_DCCP:
|
||||||
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
if (xprth + 4 < skb->data ||
|
||||||
|
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||||
__be16 *ports = (__be16 *)xprth;
|
__be16 *ports = (__be16 *)xprth;
|
||||||
|
|
||||||
fl->fl_ip_sport = ports[!!reverse];
|
fl->fl_ip_sport = ports[!!reverse];
|
||||||
|
@ -1916,8 +1916,32 @@ ok:
|
|||||||
update_lft = 1;
|
update_lft = 1;
|
||||||
else if (stored_lft <= MIN_VALID_LIFETIME) {
|
else if (stored_lft <= MIN_VALID_LIFETIME) {
|
||||||
/* valid_lft <= stored_lft is always true */
|
/* valid_lft <= stored_lft is always true */
|
||||||
/* XXX: IPsec */
|
/*
|
||||||
update_lft = 0;
|
* RFC 4862 Section 5.5.3e:
|
||||||
|
* "Note that the preferred lifetime of
|
||||||
|
* the corresponding address is always
|
||||||
|
* reset to the Preferred Lifetime in
|
||||||
|
* the received Prefix Information
|
||||||
|
* option, regardless of whether the
|
||||||
|
* valid lifetime is also reset or
|
||||||
|
* ignored."
|
||||||
|
*
|
||||||
|
* So if the preferred lifetime in
|
||||||
|
* this advertisement is different
|
||||||
|
* than what we have stored, but the
|
||||||
|
* valid lifetime is invalid, just
|
||||||
|
* reset prefered_lft.
|
||||||
|
*
|
||||||
|
* We must set the valid lifetime
|
||||||
|
* to the stored lifetime since we'll
|
||||||
|
* be updating the timestamp below,
|
||||||
|
* else we'll set it back to the
|
||||||
|
* minumum.
|
||||||
|
*/
|
||||||
|
if (prefered_lft != ifp->prefered_lft) {
|
||||||
|
valid_lft = stored_lft;
|
||||||
|
update_lft = 1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
valid_lft = MIN_VALID_LIFETIME;
|
valid_lft = MIN_VALID_LIFETIME;
|
||||||
if (valid_lft < prefered_lft)
|
if (valid_lft < prefered_lft)
|
||||||
@ -3085,7 +3109,7 @@ restart:
|
|||||||
spin_unlock(&ifp->lock);
|
spin_unlock(&ifp->lock);
|
||||||
continue;
|
continue;
|
||||||
} else if (age >= ifp->prefered_lft) {
|
} else if (age >= ifp->prefered_lft) {
|
||||||
/* jiffies - ifp->tsamp > age >= ifp->prefered_lft */
|
/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
|
||||||
int deprecate = 0;
|
int deprecate = 0;
|
||||||
|
|
||||||
if (!(ifp->flags&IFA_F_DEPRECATED)) {
|
if (!(ifp->flags&IFA_F_DEPRECATED)) {
|
||||||
|
@ -157,7 +157,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||||||
ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
|
ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
|
||||||
ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
|
ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
|
||||||
|
|
||||||
while (pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
|
while (nh + offset + 1 < skb->data ||
|
||||||
|
pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
|
||||||
nh = skb_network_header(skb);
|
nh = skb_network_header(skb);
|
||||||
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
|
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
|
||||||
|
|
||||||
@ -177,7 +178,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||||||
case IPPROTO_TCP:
|
case IPPROTO_TCP:
|
||||||
case IPPROTO_SCTP:
|
case IPPROTO_SCTP:
|
||||||
case IPPROTO_DCCP:
|
case IPPROTO_DCCP:
|
||||||
if (!onlyproto && pskb_may_pull(skb, nh + offset + 4 - skb->data)) {
|
if (!onlyproto && (nh + offset + 4 < skb->data ||
|
||||||
|
pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
|
||||||
__be16 *ports = (__be16 *)exthdr;
|
__be16 *ports = (__be16 *)exthdr;
|
||||||
|
|
||||||
fl->fl_ip_sport = ports[!!reverse];
|
fl->fl_ip_sport = ports[!!reverse];
|
||||||
|
@ -6652,21 +6652,6 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
|
|||||||
finish_wait(sk->sk_sleep, &wait);
|
finish_wait(sk->sk_sleep, &wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sctp_sock_rfree_frag(struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
struct sk_buff *frag;
|
|
||||||
|
|
||||||
if (!skb->data_len)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
/* Don't forget the fragments. */
|
|
||||||
skb_walk_frags(skb, frag)
|
|
||||||
sctp_sock_rfree_frag(frag);
|
|
||||||
|
|
||||||
done:
|
|
||||||
sctp_sock_rfree(skb);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
|
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
|
||||||
{
|
{
|
||||||
struct sk_buff *frag;
|
struct sk_buff *frag;
|
||||||
@ -6776,7 +6761,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||||||
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
|
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
|
||||||
event = sctp_skb2event(skb);
|
event = sctp_skb2event(skb);
|
||||||
if (event->asoc == assoc) {
|
if (event->asoc == assoc) {
|
||||||
sctp_sock_rfree_frag(skb);
|
|
||||||
__skb_unlink(skb, &oldsk->sk_receive_queue);
|
__skb_unlink(skb, &oldsk->sk_receive_queue);
|
||||||
__skb_queue_tail(&newsk->sk_receive_queue, skb);
|
__skb_queue_tail(&newsk->sk_receive_queue, skb);
|
||||||
sctp_skb_set_owner_r_frag(skb, newsk);
|
sctp_skb_set_owner_r_frag(skb, newsk);
|
||||||
@ -6807,7 +6791,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||||||
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
|
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
|
||||||
event = sctp_skb2event(skb);
|
event = sctp_skb2event(skb);
|
||||||
if (event->asoc == assoc) {
|
if (event->asoc == assoc) {
|
||||||
sctp_sock_rfree_frag(skb);
|
|
||||||
__skb_unlink(skb, &oldsp->pd_lobby);
|
__skb_unlink(skb, &oldsp->pd_lobby);
|
||||||
__skb_queue_tail(queue, skb);
|
__skb_queue_tail(queue, skb);
|
||||||
sctp_skb_set_owner_r_frag(skb, newsk);
|
sctp_skb_set_owner_r_frag(skb, newsk);
|
||||||
@ -6822,15 +6805,11 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) {
|
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
|
||||||
sctp_sock_rfree_frag(skb);
|
|
||||||
sctp_skb_set_owner_r_frag(skb, newsk);
|
sctp_skb_set_owner_r_frag(skb, newsk);
|
||||||
}
|
|
||||||
|
|
||||||
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) {
|
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
|
||||||
sctp_sock_rfree_frag(skb);
|
|
||||||
sctp_skb_set_owner_r_frag(skb, newsk);
|
sctp_skb_set_owner_r_frag(skb, newsk);
|
||||||
}
|
|
||||||
|
|
||||||
/* Set the type of socket to indicate that it is peeled off from the
|
/* Set the type of socket to indicate that it is peeled off from the
|
||||||
* original UDP-style socket or created with the accept() call on a
|
* original UDP-style socket or created with the accept() call on a
|
||||||
|
Loading…
Reference in New Issue
Block a user