From dc9b0dc4561dedd44b2bf4b8e5ef1a8a040b2424 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 12 Mar 2022 11:09:34 +0100 Subject: [PATCH 1/4] libceph: disambiguate cluster/pool full log message Signed-off-by: Ilya Dryomov --- net/ceph/osd_client.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 1c5815530e0d..83eb97c94e83 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2385,7 +2385,11 @@ again: if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { err = -ENOSPC; } else { - pr_warn_ratelimited("FULL or reached pool quota\n"); + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) + pr_warn_ratelimited("cluster is full (osdmap FULL)\n"); + else + pr_warn_ratelimited("pool %lld is full or reached quota\n", + req->r_t.base_oloc.pool); req->r_t.paused = true; maybe_request_map(osdc); } From 7f47f7f3b3c33fd2b4a662cd43cd1af96e1a297e Mon Sep 17 00:00:00 2001 From: Niels Dossche Date: Tue, 15 Mar 2022 16:29:47 +0100 Subject: [PATCH 2/4] ceph: get snap_rwsem read lock in handle_cap_export for ceph_add_cap ceph_add_cap says in its function documentation that the caller should hold the read lock on the session snap_rwsem. Furthermore, not only ceph_add_cap needs that lock, when it calls to ceph_lookup_snap_realm it eventually calls ceph_get_snap_realm which states via lockdep that snap_rwsem needs to be held. handle_cap_export calls ceph_add_cap without that mdsc->snap_rwsem held. Thus, since ceph_get_snap_realm and ceph_add_cap both need the lock, the common place to acquire that lock is inside handle_cap_export. Signed-off-by: Niels Dossche Reviewed-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/caps.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f1ad6884d4da..31204cdc2fbf 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -3870,6 +3870,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n", inode, ci, mds, mseq, target); retry: + down_read(&mdsc->snap_rwsem); spin_lock(&ci->i_ceph_lock); cap = __get_cap_for_mds(ci, mds); if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id)) @@ -3933,6 +3934,7 @@ retry: } spin_unlock(&ci->i_ceph_lock); + up_read(&mdsc->snap_rwsem); mutex_unlock(&session->s_mutex); /* open target session */ @@ -3958,6 +3960,7 @@ retry: out_unlock: spin_unlock(&ci->i_ceph_lock); + up_read(&mdsc->snap_rwsem); mutex_unlock(&session->s_mutex); if (tsession) { mutex_unlock(&tsession->s_mutex); From 396ea1681892211dd3fbc1a58bfc2c2c971433e3 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Tue, 22 Mar 2022 11:03:13 +0800 Subject: [PATCH 3/4] ceph: remove incorrect session state check Once the session is opened the s->s_ttl will be set, and when receiving a new mdsmap and the MDS map is changed, it will be possibly will close some sessions and open new ones. And then some sessions will be in CLOSING state evening without unmounting. URL: https://tracker.ceph.com/issues/54979 Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fa38c013126d..00c3de177dd6 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4434,8 +4434,6 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc) bool check_session_state(struct ceph_mds_session *s) { - struct ceph_fs_client *fsc = s->s_mdsc->fsc; - switch (s->s_state) { case CEPH_MDS_SESSION_OPEN: if (s->s_ttl && time_after(jiffies, s->s_ttl)) { @@ -4444,10 +4442,6 @@ bool check_session_state(struct ceph_mds_session *s) } break; case CEPH_MDS_SESSION_CLOSING: - /* Should never reach this when not force unmounting */ - WARN_ON_ONCE(s->s_ttl && - READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN); - fallthrough; case CEPH_MDS_SESSION_NEW: case CEPH_MDS_SESSION_RESTARTING: case CEPH_MDS_SESSION_CLOSED: From 7acae6183cf37c48b8da48bbbdb78820fb3913f3 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 14 Apr 2022 09:07:21 +0800 Subject: [PATCH 4/4] ceph: fix possible NULL pointer dereference for req->r_session The request will be inserted into the ci->i_unsafe_dirops before assigning the req->r_session, so it's possible that we will hit NULL pointer dereference bug here. Cc: stable@vger.kernel.org URL: https://tracker.ceph.com/issues/55327 Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Tested-by: Aaron Tomlin Signed-off-by: Ilya Dryomov --- fs/ceph/caps.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 31204cdc2fbf..5c14ef04e474 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2274,6 +2274,8 @@ retry: list_for_each_entry(req, &ci->i_unsafe_dirops, r_unsafe_dir_item) { s = req->r_session; + if (!s) + continue; if (unlikely(s->s_mds >= max_sessions)) { spin_unlock(&ci->i_unsafe_lock); for (i = 0; i < max_sessions; i++) { @@ -2294,6 +2296,8 @@ retry: list_for_each_entry(req, &ci->i_unsafe_iops, r_unsafe_target_item) { s = req->r_session; + if (!s) + continue; if (unlikely(s->s_mds >= max_sessions)) { spin_unlock(&ci->i_unsafe_lock); for (i = 0; i < max_sessions; i++) {