forked from Minki/linux
net/smc: use local struct sock variables consistently
Cleanup to consistently exploit the local struct sock definitions. No functional change. Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9d5fd927d2
commit
3163c5071f
@ -581,39 +581,39 @@ out_err:
|
||||
|
||||
static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
|
||||
{
|
||||
struct sock *sk = &lsmc->sk;
|
||||
struct socket *new_clcsock;
|
||||
struct socket *new_clcsock = NULL;
|
||||
struct sock *lsk = &lsmc->sk;
|
||||
struct sock *new_sk;
|
||||
int rc;
|
||||
|
||||
release_sock(&lsmc->sk);
|
||||
new_sk = smc_sock_alloc(sock_net(sk), NULL);
|
||||
release_sock(lsk);
|
||||
new_sk = smc_sock_alloc(sock_net(lsk), NULL);
|
||||
if (!new_sk) {
|
||||
rc = -ENOMEM;
|
||||
lsmc->sk.sk_err = ENOMEM;
|
||||
lsk->sk_err = ENOMEM;
|
||||
*new_smc = NULL;
|
||||
lock_sock(&lsmc->sk);
|
||||
lock_sock(lsk);
|
||||
goto out;
|
||||
}
|
||||
*new_smc = smc_sk(new_sk);
|
||||
|
||||
rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
|
||||
lock_sock(&lsmc->sk);
|
||||
lock_sock(lsk);
|
||||
if (rc < 0) {
|
||||
lsmc->sk.sk_err = -rc;
|
||||
lsk->sk_err = -rc;
|
||||
new_sk->sk_state = SMC_CLOSED;
|
||||
sock_set_flag(new_sk, SOCK_DEAD);
|
||||
sk->sk_prot->unhash(new_sk);
|
||||
new_sk->sk_prot->unhash(new_sk);
|
||||
sock_put(new_sk);
|
||||
*new_smc = NULL;
|
||||
goto out;
|
||||
}
|
||||
if (lsmc->sk.sk_state == SMC_CLOSED) {
|
||||
if (lsk->sk_state == SMC_CLOSED) {
|
||||
if (new_clcsock)
|
||||
sock_release(new_clcsock);
|
||||
new_sk->sk_state = SMC_CLOSED;
|
||||
sock_set_flag(new_sk, SOCK_DEAD);
|
||||
sk->sk_prot->unhash(new_sk);
|
||||
new_sk->sk_prot->unhash(new_sk);
|
||||
sock_put(new_sk);
|
||||
*new_smc = NULL;
|
||||
goto out;
|
||||
@ -936,11 +936,12 @@ static void smc_tcp_listen_work(struct work_struct *work)
|
||||
{
|
||||
struct smc_sock *lsmc = container_of(work, struct smc_sock,
|
||||
tcp_listen_work);
|
||||
struct sock *lsk = &lsmc->sk;
|
||||
struct smc_sock *new_smc;
|
||||
int rc = 0;
|
||||
|
||||
lock_sock(&lsmc->sk);
|
||||
while (lsmc->sk.sk_state == SMC_LISTEN) {
|
||||
lock_sock(lsk);
|
||||
while (lsk->sk_state == SMC_LISTEN) {
|
||||
rc = smc_clcsock_accept(lsmc, &new_smc);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -949,15 +950,15 @@ static void smc_tcp_listen_work(struct work_struct *work)
|
||||
|
||||
new_smc->listen_smc = lsmc;
|
||||
new_smc->use_fallback = false; /* assume rdma capability first*/
|
||||
sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */
|
||||
sock_hold(lsk); /* sock_put in smc_listen_work */
|
||||
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
|
||||
smc_copy_sock_settings_to_smc(new_smc);
|
||||
schedule_work(&new_smc->smc_listen_work);
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(&lsmc->sk);
|
||||
lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */
|
||||
release_sock(lsk);
|
||||
lsk->sk_data_ready(lsk); /* no more listening, wake accept */
|
||||
}
|
||||
|
||||
static int smc_listen(struct socket *sock, int backlog)
|
||||
|
@ -115,36 +115,38 @@ static int smc_close_abort(struct smc_connection *conn)
|
||||
*/
|
||||
static void smc_close_active_abort(struct smc_sock *smc)
|
||||
{
|
||||
struct sock *sk = &smc->sk;
|
||||
|
||||
struct smc_cdc_conn_state_flags *txflags =
|
||||
&smc->conn.local_tx_ctrl.conn_state_flags;
|
||||
|
||||
smc->sk.sk_err = ECONNABORTED;
|
||||
sk->sk_err = ECONNABORTED;
|
||||
if (smc->clcsock && smc->clcsock->sk) {
|
||||
smc->clcsock->sk->sk_err = ECONNABORTED;
|
||||
smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
|
||||
}
|
||||
switch (smc->sk.sk_state) {
|
||||
switch (sk->sk_state) {
|
||||
case SMC_INIT:
|
||||
case SMC_ACTIVE:
|
||||
smc->sk.sk_state = SMC_PEERABORTWAIT;
|
||||
sk->sk_state = SMC_PEERABORTWAIT;
|
||||
break;
|
||||
case SMC_APPCLOSEWAIT1:
|
||||
case SMC_APPCLOSEWAIT2:
|
||||
txflags->peer_conn_abort = 1;
|
||||
sock_release(smc->clcsock);
|
||||
if (!smc_cdc_rxed_any_close(&smc->conn))
|
||||
smc->sk.sk_state = SMC_PEERABORTWAIT;
|
||||
sk->sk_state = SMC_PEERABORTWAIT;
|
||||
else
|
||||
smc->sk.sk_state = SMC_CLOSED;
|
||||
sk->sk_state = SMC_CLOSED;
|
||||
break;
|
||||
case SMC_PEERCLOSEWAIT1:
|
||||
case SMC_PEERCLOSEWAIT2:
|
||||
if (!txflags->peer_conn_closed) {
|
||||
smc->sk.sk_state = SMC_PEERABORTWAIT;
|
||||
sk->sk_state = SMC_PEERABORTWAIT;
|
||||
txflags->peer_conn_abort = 1;
|
||||
sock_release(smc->clcsock);
|
||||
} else {
|
||||
smc->sk.sk_state = SMC_CLOSED;
|
||||
sk->sk_state = SMC_CLOSED;
|
||||
}
|
||||
break;
|
||||
case SMC_PROCESSABORT:
|
||||
@ -153,7 +155,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
|
||||
txflags->peer_conn_abort = 1;
|
||||
sock_release(smc->clcsock);
|
||||
}
|
||||
smc->sk.sk_state = SMC_CLOSED;
|
||||
sk->sk_state = SMC_CLOSED;
|
||||
break;
|
||||
case SMC_PEERFINCLOSEWAIT:
|
||||
case SMC_PEERABORTWAIT:
|
||||
@ -161,8 +163,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
|
||||
break;
|
||||
}
|
||||
|
||||
sock_set_flag(&smc->sk, SOCK_DEAD);
|
||||
smc->sk.sk_state_change(&smc->sk);
|
||||
sock_set_flag(sk, SOCK_DEAD);
|
||||
sk->sk_state_change(sk);
|
||||
}
|
||||
|
||||
static inline bool smc_close_sent_any_close(struct smc_connection *conn)
|
||||
@ -278,7 +280,7 @@ again:
|
||||
}
|
||||
|
||||
if (old_state != sk->sk_state)
|
||||
sk->sk_state_change(&smc->sk);
|
||||
sk->sk_state_change(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -331,7 +333,7 @@ static void smc_close_passive_work(struct work_struct *work)
|
||||
struct sock *sk = &smc->sk;
|
||||
int old_state;
|
||||
|
||||
lock_sock(&smc->sk);
|
||||
lock_sock(sk);
|
||||
old_state = sk->sk_state;
|
||||
|
||||
if (!conn->alert_token_local) {
|
||||
@ -340,7 +342,7 @@ static void smc_close_passive_work(struct work_struct *work)
|
||||
goto wakeup;
|
||||
}
|
||||
|
||||
rxflags = &smc->conn.local_rx_ctrl.conn_state_flags;
|
||||
rxflags = &conn->local_rx_ctrl.conn_state_flags;
|
||||
if (rxflags->peer_conn_abort) {
|
||||
smc_close_passive_abort_received(smc);
|
||||
goto wakeup;
|
||||
@ -348,7 +350,7 @@ static void smc_close_passive_work(struct work_struct *work)
|
||||
|
||||
switch (sk->sk_state) {
|
||||
case SMC_INIT:
|
||||
if (atomic_read(&smc->conn.bytes_to_rcv) ||
|
||||
if (atomic_read(&conn->bytes_to_rcv) ||
|
||||
(rxflags->peer_done_writing &&
|
||||
!smc_cdc_rxed_any_close(conn)))
|
||||
sk->sk_state = SMC_APPCLOSEWAIT1;
|
||||
@ -365,7 +367,7 @@ static void smc_close_passive_work(struct work_struct *work)
|
||||
/* to check for closing */
|
||||
case SMC_PEERCLOSEWAIT2:
|
||||
case SMC_PEERFINCLOSEWAIT:
|
||||
if (!smc_cdc_rxed_any_close(&smc->conn))
|
||||
if (!smc_cdc_rxed_any_close(conn))
|
||||
break;
|
||||
if (sock_flag(sk, SOCK_DEAD) &&
|
||||
smc_close_sent_any_close(conn)) {
|
||||
@ -394,12 +396,12 @@ wakeup:
|
||||
sk->sk_state_change(sk);
|
||||
if ((sk->sk_state == SMC_CLOSED) &&
|
||||
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
|
||||
smc_conn_free(&smc->conn);
|
||||
smc_conn_free(conn);
|
||||
schedule_delayed_work(&smc->sock_put_work,
|
||||
SMC_CLOSE_SOCK_PUT_DELAY);
|
||||
}
|
||||
}
|
||||
release_sock(&smc->sk);
|
||||
release_sock(sk);
|
||||
}
|
||||
|
||||
void smc_close_sock_put_work(struct work_struct *work)
|
||||
@ -462,7 +464,7 @@ again:
|
||||
}
|
||||
|
||||
if (old_state != sk->sk_state)
|
||||
sk->sk_state_change(&smc->sk);
|
||||
sk->sk_state_change(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user