forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/ethernet/cadence/macb_main.c5cebb40bc9
("net: macb: Fix PTP one step sync support")138badbc21
("net: macb: use NAPI for TX completion path") https://lore.kernel.org/all/20220523111021.31489367@canb.auug.org.au/ net/smc/af_smc.c75c1edf23b
("net/smc: postpone sk_refcnt increment in connect()")3aba103006
("net/smc: align the connect behaviour with TCP") https://lore.kernel.org/all/20220524114408.4bf1af38@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
677fb75253
@ -382,6 +382,15 @@ option is set to SOCK_TXREHASH_DEFAULT (i. e. not overridden by setsockopt).
|
||||
If set to 1 (default), hash rethink is performed on listening socket.
|
||||
If set to 0, hash rethink is not performed.
|
||||
|
||||
gro_normal_batch
|
||||
----------------
|
||||
|
||||
Maximum number of the segments to batch up on output of GRO. When a packet
|
||||
exits GRO, either as a coalesced superframe or as an original packet which
|
||||
GRO has decided not to coalesce, it is placed on a per-NAPI list. This
|
||||
list is then passed to the stack when the number of segments reaches the
|
||||
gro_normal_batch limit.
|
||||
|
||||
2. /proc/sys/net/unix - Parameters for Unix domain sockets
|
||||
----------------------------------------------------------
|
||||
|
||||
|
@ -943,7 +943,7 @@ static void amt_req_work(struct work_struct *work)
|
||||
if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
|
||||
goto out;
|
||||
|
||||
if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) {
|
||||
if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
|
||||
netdev_dbg(amt->dev, "Gateway is not ready");
|
||||
amt->qi = AMT_INIT_REQ_TIMEOUT;
|
||||
amt->ready4 = false;
|
||||
@ -951,13 +951,15 @@ static void amt_req_work(struct work_struct *work)
|
||||
amt->remote_ip = 0;
|
||||
__amt_update_gw_status(amt, AMT_STATUS_INIT, false);
|
||||
amt->req_cnt = 0;
|
||||
goto out;
|
||||
}
|
||||
spin_unlock_bh(&amt->lock);
|
||||
|
||||
amt_send_request(amt, false);
|
||||
amt_send_request(amt, true);
|
||||
amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
|
||||
spin_lock_bh(&amt->lock);
|
||||
__amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
|
||||
amt->req_cnt++;
|
||||
out:
|
||||
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
|
||||
mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
|
||||
@ -2696,9 +2698,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
err = true;
|
||||
goto drop;
|
||||
}
|
||||
if (amt_advertisement_handler(amt, skb))
|
||||
amt->dev->stats.rx_dropped++;
|
||||
goto out;
|
||||
err = amt_advertisement_handler(amt, skb);
|
||||
break;
|
||||
case AMT_MSG_MULTICAST_DATA:
|
||||
if (iph->saddr != amt->remote_ip) {
|
||||
netdev_dbg(amt->dev, "Invalid Relay IP\n");
|
||||
|
@ -5591,16 +5591,23 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
|
||||
const struct ethtool_ops *ops;
|
||||
struct net_device *real_dev;
|
||||
struct phy_device *phydev;
|
||||
int ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
real_dev = bond_option_active_slave_get_rcu(bond);
|
||||
dev_hold(real_dev);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (real_dev) {
|
||||
ops = real_dev->ethtool_ops;
|
||||
phydev = real_dev->phydev;
|
||||
|
||||
if (phy_has_tsinfo(phydev)) {
|
||||
return phy_ts_info(phydev, info);
|
||||
ret = phy_ts_info(phydev, info);
|
||||
goto out;
|
||||
} else if (ops->get_ts_info) {
|
||||
return ops->get_ts_info(real_dev, info);
|
||||
ret = ops->get_ts_info(real_dev, info);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
@ -5608,7 +5615,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
|
||||
SOF_TIMESTAMPING_SOFTWARE;
|
||||
info->phc_index = -1;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
dev_put(real_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops bond_ethtool_ops = {
|
||||
|
@ -72,7 +72,6 @@ source "drivers/net/dsa/realtek/Kconfig"
|
||||
|
||||
config NET_DSA_SMSC_LAN9303
|
||||
tristate
|
||||
depends on VLAN_8021Q || VLAN_8021Q=n
|
||||
select NET_DSA_TAG_LAN9303
|
||||
select REGMAP
|
||||
help
|
||||
@ -82,6 +81,7 @@ config NET_DSA_SMSC_LAN9303
|
||||
config NET_DSA_SMSC_LAN9303_I2C
|
||||
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
|
||||
depends on I2C
|
||||
depends on VLAN_8021Q || VLAN_8021Q=n
|
||||
select NET_DSA_SMSC_LAN9303
|
||||
select REGMAP_I2C
|
||||
help
|
||||
@ -91,6 +91,7 @@ config NET_DSA_SMSC_LAN9303_I2C
|
||||
config NET_DSA_SMSC_LAN9303_MDIO
|
||||
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
|
||||
select NET_DSA_SMSC_LAN9303
|
||||
depends on VLAN_8021Q || VLAN_8021Q=n
|
||||
help
|
||||
Enable access functions if the SMSC/Microchip LAN9303 is configured
|
||||
for MDIO managed mode.
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/ptp_classify.h>
|
||||
#include <linux/reset.h>
|
||||
#include "macb.h"
|
||||
|
||||
@ -1122,6 +1123,36 @@ static void macb_tx_error_task(struct work_struct *work)
|
||||
napi_enable(&queue->napi_tx);
|
||||
}
|
||||
|
||||
static bool ptp_one_step_sync(struct sk_buff *skb)
|
||||
{
|
||||
struct ptp_header *hdr;
|
||||
unsigned int ptp_class;
|
||||
u8 msgtype;
|
||||
|
||||
/* No need to parse packet if PTP TS is not involved */
|
||||
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
|
||||
goto not_oss;
|
||||
|
||||
/* Identify and return whether PTP one step sync is being processed */
|
||||
ptp_class = ptp_classify_raw(skb);
|
||||
if (ptp_class == PTP_CLASS_NONE)
|
||||
goto not_oss;
|
||||
|
||||
hdr = ptp_parse_header(skb, ptp_class);
|
||||
if (!hdr)
|
||||
goto not_oss;
|
||||
|
||||
if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
|
||||
goto not_oss;
|
||||
|
||||
msgtype = ptp_get_msgtype(hdr, ptp_class);
|
||||
if (msgtype == PTP_MSGTYPE_SYNC)
|
||||
return true;
|
||||
|
||||
not_oss:
|
||||
return false;
|
||||
}
|
||||
|
||||
static int macb_tx_complete(struct macb_queue *queue, int budget)
|
||||
{
|
||||
struct macb *bp = queue->bp;
|
||||
@ -1158,8 +1189,8 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
|
||||
|
||||
/* First, update TX stats if needed */
|
||||
if (skb) {
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags &
|
||||
SKBTX_HW_TSTAMP) &&
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
|
||||
!ptp_one_step_sync(skb) &&
|
||||
gem_ptp_do_txstamp(queue, skb, desc) == 0) {
|
||||
/* skb now belongs to timestamp buffer
|
||||
* and will be removed later
|
||||
@ -2063,7 +2094,8 @@ static unsigned int macb_tx_map(struct macb *bp,
|
||||
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
|
||||
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
|
||||
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
|
||||
skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
|
||||
skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
|
||||
!ptp_one_step_sync(skb))
|
||||
ctrl |= MACB_BIT(TX_NOCRC);
|
||||
} else
|
||||
/* Only set MSS/MFS on payload descriptors
|
||||
@ -2161,7 +2193,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
|
||||
|
||||
if (!(ndev->features & NETIF_F_HW_CSUM) ||
|
||||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
|
||||
skb_shinfo(*skb)->gso_size) /* Not available for GSO */
|
||||
skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
|
||||
return 0;
|
||||
|
||||
if (padlen <= 0) {
|
||||
|
@ -470,8 +470,10 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
case HWTSTAMP_TX_ONESTEP_SYNC:
|
||||
if (gem_ptp_set_one_step_sync(bp, 1) != 0)
|
||||
return -ERANGE;
|
||||
fallthrough;
|
||||
tx_bd_control = TSTAMP_ALL_FRAMES;
|
||||
break;
|
||||
case HWTSTAMP_TX_ON:
|
||||
gem_ptp_set_one_step_sync(bp, 0);
|
||||
tx_bd_control = TSTAMP_ALL_FRAMES;
|
||||
break;
|
||||
default:
|
||||
|
@ -1097,6 +1097,7 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
||||
u32 fd_len = dpaa2_fd_get_len(fd);
|
||||
struct dpaa2_sg_entry *sgt;
|
||||
int should_free_skb = 1;
|
||||
void *tso_hdr;
|
||||
int i;
|
||||
|
||||
fd_addr = dpaa2_fd_get_addr(fd);
|
||||
@ -1135,20 +1136,21 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
||||
sgt = (struct dpaa2_sg_entry *)(buffer_start +
|
||||
priv->tx_data_offset);
|
||||
|
||||
/* Unmap the SGT buffer */
|
||||
dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap and free the header */
|
||||
tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
|
||||
dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
kfree(dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)));
|
||||
kfree(tso_hdr);
|
||||
|
||||
/* Unmap the other SG entries for the data */
|
||||
for (i = 1; i < swa->tso.num_sg; i++)
|
||||
dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
|
||||
dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
|
||||
|
||||
/* Unmap the SGT buffer */
|
||||
dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
if (!swa->tso.is_last_fd)
|
||||
should_free_skb = 0;
|
||||
} else {
|
||||
|
@ -3876,9 +3876,11 @@ fec_probe(struct platform_device *pdev)
|
||||
mutex_init(&fep->ptp_clk_mutex);
|
||||
|
||||
/* clk_ref is optional, depends on board */
|
||||
fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
|
||||
if (IS_ERR(fep->clk_ref))
|
||||
fep->clk_ref = NULL;
|
||||
fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
|
||||
if (IS_ERR(fep->clk_ref)) {
|
||||
ret = PTR_ERR(fep->clk_ref);
|
||||
goto failed_clk;
|
||||
}
|
||||
fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
|
||||
|
||||
/* clk_2x_txclk is optional, depends on board */
|
||||
|
@ -386,7 +386,7 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
|
||||
return -ENOMEM;
|
||||
|
||||
wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
|
||||
sizeof(wq->prod_idx), GFP_KERNEL);
|
||||
sizeof(*wq->shadow_idx), GFP_KERNEL);
|
||||
if (!wq->shadow_idx)
|
||||
goto err_shadow_idx;
|
||||
|
||||
|
@ -1084,8 +1084,9 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
|
||||
unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
|
||||
struct tc_cls_u32_offload cls_u32 = { };
|
||||
struct stmmac_packet_attrs attr = { };
|
||||
struct tc_action **actions, *act;
|
||||
struct tc_action **actions;
|
||||
struct tc_u32_sel *sel;
|
||||
struct tcf_gact *gact;
|
||||
struct tcf_exts *exts;
|
||||
int ret, i, nk = 1;
|
||||
|
||||
@ -1110,8 +1111,8 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
|
||||
goto cleanup_exts;
|
||||
}
|
||||
|
||||
act = kcalloc(nk, sizeof(*act), GFP_KERNEL);
|
||||
if (!act) {
|
||||
gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL);
|
||||
if (!gact) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup_actions;
|
||||
}
|
||||
@ -1126,9 +1127,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
|
||||
exts->nr_actions = nk;
|
||||
exts->actions = actions;
|
||||
for (i = 0; i < nk; i++) {
|
||||
struct tcf_gact *gact = to_gact(&act[i]);
|
||||
|
||||
actions[i] = &act[i];
|
||||
actions[i] = (struct tc_action *)&gact[i];
|
||||
gact->tcf_action = TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
@ -1152,7 +1151,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
|
||||
stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
|
||||
|
||||
cleanup_act:
|
||||
kfree(act);
|
||||
kfree(gact);
|
||||
cleanup_actions:
|
||||
kfree(actions);
|
||||
cleanup_exts:
|
||||
|
@ -2643,7 +2643,10 @@ static int netvsc_suspend(struct hv_device *dev)
|
||||
|
||||
/* Save the current config info */
|
||||
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
|
||||
|
||||
if (!ndev_ctx->saved_netvsc_dev_info) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ret = netvsc_detach(net, nvdev);
|
||||
out:
|
||||
rtnl_unlock();
|
||||
|
@ -241,7 +241,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
|
||||
}
|
||||
EXPORT_SYMBOL(st21nfca_hci_se_io);
|
||||
|
||||
static void st21nfca_se_wt_timeout(struct timer_list *t)
|
||||
static void st21nfca_se_wt_work(struct work_struct *work)
|
||||
{
|
||||
/*
|
||||
* No answer from the secure element
|
||||
@ -254,8 +254,9 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
|
||||
*/
|
||||
/* hardware reset managed through VCC_UICC_OUT power supply */
|
||||
u8 param = 0x01;
|
||||
struct st21nfca_hci_info *info = from_timer(info, t,
|
||||
se_info.bwi_timer);
|
||||
struct st21nfca_hci_info *info = container_of(work,
|
||||
struct st21nfca_hci_info,
|
||||
se_info.timeout_work);
|
||||
|
||||
info->se_info.bwi_active = false;
|
||||
|
||||
@ -271,6 +272,13 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
|
||||
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
|
||||
}
|
||||
|
||||
static void st21nfca_se_wt_timeout(struct timer_list *t)
|
||||
{
|
||||
struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer);
|
||||
|
||||
schedule_work(&info->se_info.timeout_work);
|
||||
}
|
||||
|
||||
static void st21nfca_se_activation_timeout(struct timer_list *t)
|
||||
{
|
||||
struct st21nfca_hci_info *info = from_timer(info, t,
|
||||
@ -360,6 +368,7 @@ int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
|
||||
switch (event) {
|
||||
case ST21NFCA_EVT_TRANSMIT_DATA:
|
||||
del_timer_sync(&info->se_info.bwi_timer);
|
||||
cancel_work_sync(&info->se_info.timeout_work);
|
||||
info->se_info.bwi_active = false;
|
||||
r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE,
|
||||
ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
|
||||
@ -389,6 +398,7 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
|
||||
struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
|
||||
|
||||
init_completion(&info->se_info.req_completion);
|
||||
INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work);
|
||||
/* initialize timers */
|
||||
timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
|
||||
info->se_info.bwi_active = false;
|
||||
@ -416,6 +426,7 @@ void st21nfca_se_deinit(struct nfc_hci_dev *hdev)
|
||||
if (info->se_info.se_active)
|
||||
del_timer_sync(&info->se_info.se_active_timer);
|
||||
|
||||
cancel_work_sync(&info->se_info.timeout_work);
|
||||
info->se_info.bwi_active = false;
|
||||
info->se_info.se_active = false;
|
||||
}
|
||||
|
@ -141,6 +141,7 @@ struct st21nfca_se_info {
|
||||
|
||||
se_io_cb_t cb;
|
||||
void *cb_context;
|
||||
struct work_struct timeout_work;
|
||||
};
|
||||
|
||||
struct st21nfca_hci_info {
|
||||
|
@ -43,6 +43,9 @@
|
||||
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
|
||||
#define OFF_PTP_SEQUENCE_ID 30
|
||||
|
||||
/* PTP header flag fields */
|
||||
#define PTP_FLAG_TWOSTEP BIT(1)
|
||||
|
||||
/* Below defines should actually be removed at some point in time. */
|
||||
#define IP6_HLEN 40
|
||||
#define UDP_HLEN 8
|
||||
|
@ -1330,7 +1330,7 @@ TRACE_EVENT(rxrpc_call_reset,
|
||||
__entry->call_serial = call->rx_serial;
|
||||
__entry->conn_serial = call->conn->hi_serial;
|
||||
__entry->tx_seq = call->tx_hard_ack;
|
||||
__entry->rx_seq = call->ackr_seen;
|
||||
__entry->rx_seq = call->rx_hard_ack;
|
||||
),
|
||||
|
||||
TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
|
||||
|
@ -943,10 +943,11 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
|
||||
|
||||
bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
|
||||
|
||||
if (!conn)
|
||||
/* Check if connection is still pending */
|
||||
if (conn != hci_lookup_le_connect(hdev))
|
||||
goto done;
|
||||
|
||||
hci_le_conn_failed(conn, err);
|
||||
hci_conn_failed(conn, err);
|
||||
|
||||
done:
|
||||
hci_dev_unlock(hdev);
|
||||
|
@ -5632,10 +5632,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
}
|
||||
|
||||
if (status) {
|
||||
hci_conn_failed(conn, status);
|
||||
/* All connection failure handling is taken care of by the
|
||||
* hci_conn_failed function which is triggered by the HCI
|
||||
* request completion callbacks used for connecting.
|
||||
*/
|
||||
if (status)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
|
||||
addr_type = BDADDR_LE_PUBLIC;
|
||||
|
@ -668,13 +668,12 @@ struct rxrpc_call {
|
||||
|
||||
spinlock_t input_lock; /* Lock for packet input to this call */
|
||||
|
||||
/* receive-phase ACK management */
|
||||
/* Receive-phase ACK management (ACKs we send). */
|
||||
u8 ackr_reason; /* reason to ACK */
|
||||
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
|
||||
rxrpc_serial_t ackr_first_seq; /* first sequence number received */
|
||||
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
|
||||
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
|
||||
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
|
||||
rxrpc_seq_t ackr_highest_seq; /* Higest sequence number received */
|
||||
atomic_t ackr_nr_unacked; /* Number of unacked packets */
|
||||
atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */
|
||||
|
||||
/* RTT management */
|
||||
rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
|
||||
@ -684,8 +683,10 @@ struct rxrpc_call {
|
||||
#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
|
||||
#define RXRPC_CALL_RTT_PEND_SHIFT 8
|
||||
|
||||
/* transmission-phase ACK management */
|
||||
/* Transmission-phase ACK management (ACKs we've received). */
|
||||
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
|
||||
rxrpc_seq_t acks_first_seq; /* first sequence number received */
|
||||
rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
|
||||
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
|
||||
rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
|
||||
rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
|
||||
|
@ -406,7 +406,8 @@ recheck_state:
|
||||
goto recheck_state;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
|
||||
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) &&
|
||||
call->state != RXRPC_CALL_CLIENT_RECV_REPLY) {
|
||||
rxrpc_resend(call, now);
|
||||
goto recheck_state;
|
||||
}
|
||||
|
@ -412,8 +412,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
enum rxrpc_call_state state;
|
||||
unsigned int j, nr_subpackets;
|
||||
rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
|
||||
unsigned int j, nr_subpackets, nr_unacked = 0;
|
||||
rxrpc_serial_t serial = sp->hdr.serial, ack_serial = serial;
|
||||
rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
|
||||
bool immediate_ack = false, jumbo_bad = false;
|
||||
u8 ack = 0;
|
||||
@ -453,7 +453,6 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
!rxrpc_receiving_reply(call))
|
||||
goto unlock;
|
||||
|
||||
call->ackr_prev_seq = seq0;
|
||||
hard_ack = READ_ONCE(call->rx_hard_ack);
|
||||
|
||||
nr_subpackets = sp->nr_subpackets;
|
||||
@ -534,6 +533,9 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
ack_serial = serial;
|
||||
}
|
||||
|
||||
if (after(seq0, call->ackr_highest_seq))
|
||||
call->ackr_highest_seq = seq0;
|
||||
|
||||
/* Queue the packet. We use a couple of memory barriers here as need
|
||||
* to make sure that rx_top is perceived to be set after the buffer
|
||||
* pointer and that the buffer pointer is set after the annotation and
|
||||
@ -567,6 +569,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
sp = NULL;
|
||||
}
|
||||
|
||||
nr_unacked++;
|
||||
|
||||
if (last) {
|
||||
set_bit(RXRPC_CALL_RX_LAST, &call->flags);
|
||||
if (!ack) {
|
||||
@ -586,9 +590,14 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
}
|
||||
call->rx_expect_next = seq + 1;
|
||||
}
|
||||
if (!ack)
|
||||
ack_serial = serial;
|
||||
}
|
||||
|
||||
ack:
|
||||
if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack)
|
||||
ack = RXRPC_ACK_IDLE;
|
||||
|
||||
if (ack)
|
||||
rxrpc_propose_ACK(call, ack, ack_serial,
|
||||
immediate_ack, true,
|
||||
@ -812,7 +821,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
|
||||
static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
|
||||
rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
|
||||
{
|
||||
rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
|
||||
rxrpc_seq_t base = READ_ONCE(call->acks_first_seq);
|
||||
|
||||
if (after(first_pkt, base))
|
||||
return true; /* The window advanced */
|
||||
@ -820,7 +829,7 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
|
||||
if (before(first_pkt, base))
|
||||
return false; /* firstPacket regressed */
|
||||
|
||||
if (after_eq(prev_pkt, call->ackr_prev_seq))
|
||||
if (after_eq(prev_pkt, call->acks_prev_seq))
|
||||
return true; /* previousPacket hasn't regressed. */
|
||||
|
||||
/* Some rx implementations put a serial number in previousPacket. */
|
||||
@ -933,8 +942,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
/* Discard any out-of-order or duplicate ACKs (outside lock). */
|
||||
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
|
||||
trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
|
||||
first_soft_ack, call->ackr_first_seq,
|
||||
prev_pkt, call->ackr_prev_seq);
|
||||
first_soft_ack, call->acks_first_seq,
|
||||
prev_pkt, call->acks_prev_seq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -949,14 +958,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
/* Discard any out-of-order or duplicate ACKs (inside lock). */
|
||||
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
|
||||
trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
|
||||
first_soft_ack, call->ackr_first_seq,
|
||||
prev_pkt, call->ackr_prev_seq);
|
||||
first_soft_ack, call->acks_first_seq,
|
||||
prev_pkt, call->acks_prev_seq);
|
||||
goto out;
|
||||
}
|
||||
call->acks_latest_ts = skb->tstamp;
|
||||
|
||||
call->ackr_first_seq = first_soft_ack;
|
||||
call->ackr_prev_seq = prev_pkt;
|
||||
call->acks_first_seq = first_soft_ack;
|
||||
call->acks_prev_seq = prev_pkt;
|
||||
|
||||
/* Parse rwind and mtu sizes if provided. */
|
||||
if (buf.info.rxMTU)
|
||||
|
@ -74,11 +74,18 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
|
||||
u8 reason)
|
||||
{
|
||||
rxrpc_serial_t serial;
|
||||
unsigned int tmp;
|
||||
rxrpc_seq_t hard_ack, top, seq;
|
||||
int ix;
|
||||
u32 mtu, jmax;
|
||||
u8 *ackp = pkt->acks;
|
||||
|
||||
tmp = atomic_xchg(&call->ackr_nr_unacked, 0);
|
||||
tmp |= atomic_xchg(&call->ackr_nr_consumed, 0);
|
||||
if (!tmp && (reason == RXRPC_ACK_DELAY ||
|
||||
reason == RXRPC_ACK_IDLE))
|
||||
return 0;
|
||||
|
||||
/* Barrier against rxrpc_input_data(). */
|
||||
serial = call->ackr_serial;
|
||||
hard_ack = READ_ONCE(call->rx_hard_ack);
|
||||
@ -89,7 +96,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
|
||||
pkt->ack.bufferSpace = htons(8);
|
||||
pkt->ack.maxSkew = htons(0);
|
||||
pkt->ack.firstPacket = htonl(hard_ack + 1);
|
||||
pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
|
||||
pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
|
||||
pkt->ack.serial = htonl(serial);
|
||||
pkt->ack.reason = reason;
|
||||
pkt->ack.nAcks = top - hard_ack;
|
||||
@ -223,6 +230,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
||||
n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
|
||||
|
||||
spin_unlock_bh(&call->lock);
|
||||
if (n == 0) {
|
||||
kfree(pkt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
iov[0].iov_base = pkt;
|
||||
iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
|
||||
@ -259,13 +270,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
|
||||
ntohl(pkt->ack.serial),
|
||||
false, true,
|
||||
rxrpc_propose_ack_retry_tx);
|
||||
} else {
|
||||
spin_lock_bh(&call->lock);
|
||||
if (after(hard_ack, call->ackr_consumed))
|
||||
call->ackr_consumed = hard_ack;
|
||||
if (after(top, call->ackr_seen))
|
||||
call->ackr_seen = top;
|
||||
spin_unlock_bh(&call->lock);
|
||||
}
|
||||
|
||||
rxrpc_set_keepalive(call);
|
||||
|
@ -260,11 +260,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
|
||||
rxrpc_end_rx_phase(call, serial);
|
||||
} else {
|
||||
/* Check to see if there's an ACK that needs sending. */
|
||||
if (after_eq(hard_ack, call->ackr_consumed + 2) ||
|
||||
after_eq(top, call->ackr_seen + 2) ||
|
||||
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
|
||||
true, true,
|
||||
if (atomic_inc_return(&call->ackr_nr_consumed) > 2)
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial,
|
||||
true, false,
|
||||
rxrpc_propose_ack_rotate_rx);
|
||||
if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
|
||||
rxrpc_send_ack_packet(call, false, NULL);
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
static struct ctl_table_header *rxrpc_sysctl_reg_table;
|
||||
static const unsigned int four = 4;
|
||||
static const unsigned int thirtytwo = 32;
|
||||
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
|
||||
static const unsigned int n_65535 = 65535;
|
||||
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
|
||||
static const unsigned long one_jiffy = 1;
|
||||
@ -89,7 +89,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = (void *)&four,
|
||||
.extra2 = (void *)&thirtytwo,
|
||||
.extra2 = (void *)&max_backlog,
|
||||
},
|
||||
{
|
||||
.procname = "rx_window_size",
|
||||
|
@ -1584,11 +1584,11 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
|
||||
if (rc && rc != -EINPROGRESS)
|
||||
goto out;
|
||||
|
||||
sock_hold(&smc->sk); /* sock put in passive closing */
|
||||
if (smc->use_fallback) {
|
||||
sock->state = rc ? SS_CONNECTING : SS_CONNECTED;
|
||||
goto out;
|
||||
}
|
||||
sock_hold(&smc->sk); /* sock put in passive closing */
|
||||
if (flags & O_NONBLOCK) {
|
||||
if (queue_work(smc_hs_wq, &smc->connect_work))
|
||||
smc->connect_nonblock = 1;
|
||||
@ -2118,13 +2118,13 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
|
||||
struct smc_clc_msg_proposal *pclc,
|
||||
struct smc_init_info *ini)
|
||||
static int smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
|
||||
struct smc_clc_msg_proposal *pclc,
|
||||
struct smc_init_info *ini)
|
||||
{
|
||||
struct smc_clc_v2_extension *smc_v2_ext;
|
||||
u8 smcr_version;
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
|
||||
goto not_found;
|
||||
@ -2142,26 +2142,31 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
|
||||
ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
|
||||
ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
|
||||
rc = smc_find_rdma_device(new_smc, ini);
|
||||
if (rc) {
|
||||
smc_find_ism_store_rc(rc, ini);
|
||||
if (rc)
|
||||
goto not_found;
|
||||
}
|
||||
|
||||
if (!ini->smcrv2.uses_gateway)
|
||||
memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
|
||||
|
||||
smcr_version = ini->smcr_version;
|
||||
ini->smcr_version = SMC_V2;
|
||||
rc = smc_listen_rdma_init(new_smc, ini);
|
||||
if (!rc)
|
||||
rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
|
||||
if (!rc)
|
||||
return;
|
||||
ini->smcr_version = smcr_version;
|
||||
smc_find_ism_store_rc(rc, ini);
|
||||
if (rc) {
|
||||
ini->smcr_version = smcr_version;
|
||||
goto not_found;
|
||||
}
|
||||
rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
|
||||
if (rc) {
|
||||
ini->smcr_version = smcr_version;
|
||||
goto not_found;
|
||||
}
|
||||
return 0;
|
||||
|
||||
not_found:
|
||||
rc = rc ?: SMC_CLC_DECL_NOSMCDEV;
|
||||
ini->smcr_version &= ~SMC_V2;
|
||||
ini->check_smcrv2 = false;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
|
||||
@ -2194,6 +2199,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
|
||||
struct smc_init_info *ini)
|
||||
{
|
||||
int prfx_rc;
|
||||
int rc;
|
||||
|
||||
/* check for ISM device matching V2 proposed device */
|
||||
smc_find_ism_v2_device_serv(new_smc, pclc, ini);
|
||||
@ -2221,14 +2227,18 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
|
||||
return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
|
||||
|
||||
/* check if RDMA V2 is available */
|
||||
smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
|
||||
if (ini->smcrv2.ib_dev_v2)
|
||||
rc = smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
|
||||
if (!rc)
|
||||
return 0;
|
||||
|
||||
/* skip V1 check if V2 is unavailable for non-Device reason */
|
||||
if (rc != SMC_CLC_DECL_NOSMCDEV &&
|
||||
rc != SMC_CLC_DECL_NOSMCRDEV &&
|
||||
rc != SMC_CLC_DECL_NOSMCDDEV)
|
||||
return rc;
|
||||
|
||||
/* check if RDMA V1 is available */
|
||||
if (!prfx_rc) {
|
||||
int rc;
|
||||
|
||||
rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
|
||||
smc_find_ism_store_rc(rc, ini);
|
||||
return (!rc) ? 0 : ini->rc;
|
||||
|
Loading…
Reference in New Issue
Block a user