ksmbd: add refcnt to ksmbd_conn struct

When sending an oplock break request, opinfo->conn is used,
But freed ->conn can be used on multichannel.
This patch add a reference count to the ksmbd_conn struct
so that it can be freed when it is no longer used.

Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Namjae Jeon 2024-09-03 20:28:08 +09:00 committed by Steve French
parent 2fb9b5dc80
commit ee426bfb9d
4 changed files with 23 additions and 40 deletions

View File

@ -39,7 +39,8 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
xa_destroy(&conn->sessions); xa_destroy(&conn->sessions);
kvfree(conn->request_buf); kvfree(conn->request_buf);
kfree(conn->preauth_info); kfree(conn->preauth_info);
kfree(conn); if (atomic_dec_and_test(&conn->refcnt))
kfree(conn);
} }
/** /**
@ -68,6 +69,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
conn->um = NULL; conn->um = NULL;
atomic_set(&conn->req_running, 0); atomic_set(&conn->req_running, 0);
atomic_set(&conn->r_count, 0); atomic_set(&conn->r_count, 0);
atomic_set(&conn->refcnt, 1);
conn->total_credits = 1; conn->total_credits = 1;
conn->outstanding_credits = 0; conn->outstanding_credits = 0;

View File

@ -106,6 +106,7 @@ struct ksmbd_conn {
bool signing_negotiated; bool signing_negotiated;
__le16 signing_algorithm; __le16 signing_algorithm;
bool binding; bool binding;
atomic_t refcnt;
}; };
struct ksmbd_conn_ops { struct ksmbd_conn_ops {

View File

@ -51,6 +51,7 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
init_waitqueue_head(&opinfo->oplock_brk); init_waitqueue_head(&opinfo->oplock_brk);
atomic_set(&opinfo->refcount, 1); atomic_set(&opinfo->refcount, 1);
atomic_set(&opinfo->breaking_cnt, 0); atomic_set(&opinfo->breaking_cnt, 0);
atomic_inc(&opinfo->conn->refcnt);
return opinfo; return opinfo;
} }
@ -124,6 +125,8 @@ static void free_opinfo(struct oplock_info *opinfo)
{ {
if (opinfo->is_lease) if (opinfo->is_lease)
free_lease(opinfo); free_lease(opinfo);
if (opinfo->conn && atomic_dec_and_test(&opinfo->conn->refcnt))
kfree(opinfo->conn);
kfree(opinfo); kfree(opinfo);
} }
@ -163,9 +166,7 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
!atomic_inc_not_zero(&opinfo->refcount)) !atomic_inc_not_zero(&opinfo->refcount))
opinfo = NULL; opinfo = NULL;
else { else {
atomic_inc(&opinfo->conn->r_count);
if (ksmbd_conn_releasing(opinfo->conn)) { if (ksmbd_conn_releasing(opinfo->conn)) {
atomic_dec(&opinfo->conn->r_count);
atomic_dec(&opinfo->refcount); atomic_dec(&opinfo->refcount);
opinfo = NULL; opinfo = NULL;
} }
@ -177,26 +178,11 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
return opinfo; return opinfo;
} }
static void opinfo_conn_put(struct oplock_info *opinfo) void opinfo_put(struct oplock_info *opinfo)
{ {
struct ksmbd_conn *conn;
if (!opinfo) if (!opinfo)
return; return;
conn = opinfo->conn;
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
* uses atomic operation for condition.
*/
if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
wake_up(&conn->r_count_q);
opinfo_put(opinfo);
}
void opinfo_put(struct oplock_info *opinfo)
{
if (!atomic_dec_and_test(&opinfo->refcount)) if (!atomic_dec_and_test(&opinfo->refcount))
return; return;
@ -1127,14 +1113,11 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
if (!atomic_inc_not_zero(&opinfo->refcount)) if (!atomic_inc_not_zero(&opinfo->refcount))
continue; continue;
atomic_inc(&opinfo->conn->r_count); if (ksmbd_conn_releasing(opinfo->conn))
if (ksmbd_conn_releasing(opinfo->conn)) {
atomic_dec(&opinfo->conn->r_count);
continue; continue;
}
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE); oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
opinfo_conn_put(opinfo); opinfo_put(opinfo);
} }
} }
up_read(&p_ci->m_lock); up_read(&p_ci->m_lock);
@ -1167,13 +1150,10 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
if (!atomic_inc_not_zero(&opinfo->refcount)) if (!atomic_inc_not_zero(&opinfo->refcount))
continue; continue;
atomic_inc(&opinfo->conn->r_count); if (ksmbd_conn_releasing(opinfo->conn))
if (ksmbd_conn_releasing(opinfo->conn)) {
atomic_dec(&opinfo->conn->r_count);
continue; continue;
}
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE); oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
opinfo_conn_put(opinfo); opinfo_put(opinfo);
} }
} }
up_read(&p_ci->m_lock); up_read(&p_ci->m_lock);
@ -1252,7 +1232,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
prev_opinfo = opinfo_get_list(ci); prev_opinfo = opinfo_get_list(ci);
if (!prev_opinfo || if (!prev_opinfo ||
(prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) { (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
opinfo_conn_put(prev_opinfo); opinfo_put(prev_opinfo);
goto set_lev; goto set_lev;
} }
prev_op_has_lease = prev_opinfo->is_lease; prev_op_has_lease = prev_opinfo->is_lease;
@ -1262,19 +1242,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
if (share_ret < 0 && if (share_ret < 0 &&
prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
err = share_ret; err = share_ret;
opinfo_conn_put(prev_opinfo); opinfo_put(prev_opinfo);
goto err_out; goto err_out;
} }
if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
opinfo_conn_put(prev_opinfo); opinfo_put(prev_opinfo);
goto op_break_not_needed; goto op_break_not_needed;
} }
list_add(&work->interim_entry, &prev_opinfo->interim_list); list_add(&work->interim_entry, &prev_opinfo->interim_list);
err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II); err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
opinfo_conn_put(prev_opinfo); opinfo_put(prev_opinfo);
if (err == -ENOENT) if (err == -ENOENT)
goto set_lev; goto set_lev;
/* Check all oplock was freed by close */ /* Check all oplock was freed by close */
@ -1337,14 +1317,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
return; return;
if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
opinfo_conn_put(brk_opinfo); opinfo_put(brk_opinfo);
return; return;
} }
brk_opinfo->open_trunc = is_trunc; brk_opinfo->open_trunc = is_trunc;
list_add(&work->interim_entry, &brk_opinfo->interim_list); list_add(&work->interim_entry, &brk_opinfo->interim_list);
oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II); oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
opinfo_conn_put(brk_opinfo); opinfo_put(brk_opinfo);
} }
/** /**
@ -1376,11 +1356,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
if (!atomic_inc_not_zero(&brk_op->refcount)) if (!atomic_inc_not_zero(&brk_op->refcount))
continue; continue;
atomic_inc(&brk_op->conn->r_count); if (ksmbd_conn_releasing(brk_op->conn))
if (ksmbd_conn_releasing(brk_op->conn)) {
atomic_dec(&brk_op->conn->r_count);
continue; continue;
}
rcu_read_unlock(); rcu_read_unlock();
if (brk_op->is_lease && (brk_op->o_lease->state & if (brk_op->is_lease && (brk_op->o_lease->state &
@ -1411,7 +1388,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
brk_op->open_trunc = is_trunc; brk_op->open_trunc = is_trunc;
oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE); oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
next: next:
opinfo_conn_put(brk_op); opinfo_put(brk_op);
rcu_read_lock(); rcu_read_lock();
} }
rcu_read_unlock(); rcu_read_unlock();

View File

@ -863,6 +863,8 @@ static bool session_fd_check(struct ksmbd_tree_connect *tcon,
list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) { list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
if (op->conn != conn) if (op->conn != conn)
continue; continue;
if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
kfree(op->conn);
op->conn = NULL; op->conn = NULL;
} }
up_write(&ci->m_lock); up_write(&ci->m_lock);
@ -965,6 +967,7 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
if (op->conn) if (op->conn)
continue; continue;
op->conn = fp->conn; op->conn = fp->conn;
atomic_inc(&op->conn->refcnt);
} }
up_write(&ci->m_lock); up_write(&ci->m_lock);