drbd: factor out master_bio completion and drbd_request destruction paths
In preparation for multiple connections and reference counting, separate the code paths for completion of the master bio and destruction of the request object. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
8d6cdd7848
commit
6870ca6d46
@ -222,13 +222,44 @@ static void maybe_wakeup_conflicting_requests(struct drbd_request *req)
|
|||||||
wake_up(&req->w.mdev->misc_wait);
|
wake_up(&req->w.mdev->misc_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
void req_may_be_done(struct drbd_request *req)
|
||||||
|
{
|
||||||
|
const unsigned long s = req->rq_state;
|
||||||
|
struct drbd_conf *mdev = req->w.mdev;
|
||||||
|
int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
|
||||||
|
|
||||||
|
/* req->master_bio still present means: Not yet completed.
|
||||||
|
*
|
||||||
|
* Unless this is RQ_POSTPONED, which will cause _req_is_done() to
|
||||||
|
* queue it on the retry workqueue instead of destroying it.
|
||||||
|
*/
|
||||||
|
if (req->master_bio && !(s & RQ_POSTPONED))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Local still pending, even though master_bio is already completed?
|
||||||
|
* may happen for RQ_LOCAL_ABORTED requests. */
|
||||||
|
if (s & RQ_LOCAL_PENDING)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
|
||||||
|
/* this is disconnected (local only) operation,
|
||||||
|
* or protocol A, B, or C P_BARRIER_ACK,
|
||||||
|
* or killed from the transfer log due to connection loss. */
|
||||||
|
_req_is_done(mdev, req, rw);
|
||||||
|
}
|
||||||
|
/* else: network part and not DONE yet. that is
|
||||||
|
* protocol A, B, or C, barrier ack still pending... */
|
||||||
|
}
|
||||||
|
|
||||||
/* Helper for __req_mod().
|
/* Helper for __req_mod().
|
||||||
* Set m->bio to the master bio, if it is fit to be completed,
|
* Set m->bio to the master bio, if it is fit to be completed,
|
||||||
* or leave it alone (it is initialized to NULL in __req_mod),
|
* or leave it alone (it is initialized to NULL in __req_mod),
|
||||||
* if it has already been completed, or cannot be completed yet.
|
* if it has already been completed, or cannot be completed yet.
|
||||||
* If m->bio is set, the error status to be returned is placed in m->error.
|
* If m->bio is set, the error status to be returned is placed in m->error.
|
||||||
*/
|
*/
|
||||||
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
|
static
|
||||||
|
void req_may_be_completed(struct drbd_request *req, struct bio_and_error *m)
|
||||||
{
|
{
|
||||||
const unsigned long s = req->rq_state;
|
const unsigned long s = req->rq_state;
|
||||||
struct drbd_conf *mdev = req->w.mdev;
|
struct drbd_conf *mdev = req->w.mdev;
|
||||||
@ -309,26 +340,15 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
|
|||||||
D_ASSERT(s & RQ_NET_DONE);
|
D_ASSERT(s & RQ_NET_DONE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
req_may_be_done(req);
|
||||||
if (s & RQ_LOCAL_PENDING)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
|
|
||||||
/* this is disconnected (local only) operation,
|
|
||||||
* or protocol A, B, or C P_BARRIER_ACK,
|
|
||||||
* or killed from the transfer log due to connection loss. */
|
|
||||||
_req_is_done(mdev, req, rw);
|
|
||||||
}
|
|
||||||
/* else: network part and not DONE yet. that is
|
|
||||||
* protocol A, B, or C, barrier ack still pending... */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
|
static void req_may_be_completed_not_susp(struct drbd_request *req, struct bio_and_error *m)
|
||||||
{
|
{
|
||||||
struct drbd_conf *mdev = req->w.mdev;
|
struct drbd_conf *mdev = req->w.mdev;
|
||||||
|
|
||||||
if (!drbd_suspended(mdev))
|
if (!drbd_suspended(mdev))
|
||||||
_req_may_be_done(req, m);
|
req_may_be_completed(req, m);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* obviously this could be coded as many single functions
|
/* obviously this could be coded as many single functions
|
||||||
@ -395,14 +415,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
req->rq_state &= ~RQ_LOCAL_PENDING;
|
req->rq_state &= ~RQ_LOCAL_PENDING;
|
||||||
|
|
||||||
maybe_wakeup_conflicting_requests(req);
|
maybe_wakeup_conflicting_requests(req);
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ABORT_DISK_IO:
|
case ABORT_DISK_IO:
|
||||||
req->rq_state |= RQ_LOCAL_ABORTED;
|
req->rq_state |= RQ_LOCAL_ABORTED;
|
||||||
if (req->rq_state & RQ_WRITE)
|
if (req->rq_state & RQ_WRITE)
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
else
|
else
|
||||||
goto goto_queue_for_net_read;
|
goto goto_queue_for_net_read;
|
||||||
break;
|
break;
|
||||||
@ -413,7 +433,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
|
|
||||||
__drbd_chk_io_error(mdev, false);
|
__drbd_chk_io_error(mdev, false);
|
||||||
maybe_wakeup_conflicting_requests(req);
|
maybe_wakeup_conflicting_requests(req);
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -421,7 +441,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
/* it is legal to fail READA */
|
/* it is legal to fail READA */
|
||||||
req->rq_state |= RQ_LOCAL_COMPLETED;
|
req->rq_state |= RQ_LOCAL_COMPLETED;
|
||||||
req->rq_state &= ~RQ_LOCAL_PENDING;
|
req->rq_state &= ~RQ_LOCAL_PENDING;
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -441,7 +461,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
/* no point in retrying if there is no good remote data,
|
/* no point in retrying if there is no good remote data,
|
||||||
* or we have no connection. */
|
* or we have no connection. */
|
||||||
if (mdev->state.pdsk != D_UP_TO_DATE) {
|
if (mdev->state.pdsk != D_UP_TO_DATE) {
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,8 +478,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
/* from __drbd_make_request
|
/* from __drbd_make_request
|
||||||
* or from bio_endio during read io-error recovery */
|
* or from bio_endio during read io-error recovery */
|
||||||
|
|
||||||
/* so we can verify the handle in the answer packet
|
/* So we can verify the handle in the answer packet.
|
||||||
* corresponding hlist_del is in _req_may_be_done() */
|
* Corresponding drbd_remove_request_interval is in
|
||||||
|
* req_may_be_completed() */
|
||||||
D_ASSERT(drbd_interval_empty(&req->i));
|
D_ASSERT(drbd_interval_empty(&req->i));
|
||||||
drbd_insert_interval(&mdev->read_requests, &req->i);
|
drbd_insert_interval(&mdev->read_requests, &req->i);
|
||||||
|
|
||||||
@ -477,7 +498,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
/* assert something? */
|
/* assert something? */
|
||||||
/* from __drbd_make_request only */
|
/* from __drbd_make_request only */
|
||||||
|
|
||||||
/* corresponding hlist_del is in _req_may_be_done() */
|
/* Corresponding drbd_remove_request_interval is in
|
||||||
|
* req_may_be_completed() */
|
||||||
D_ASSERT(drbd_interval_empty(&req->i));
|
D_ASSERT(drbd_interval_empty(&req->i));
|
||||||
drbd_insert_interval(&mdev->write_requests, &req->i);
|
drbd_insert_interval(&mdev->write_requests, &req->i);
|
||||||
|
|
||||||
@ -539,7 +561,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
req->rq_state &= ~RQ_NET_QUEUED;
|
req->rq_state &= ~RQ_NET_QUEUED;
|
||||||
/* if we did it right, tl_clear should be scheduled only after
|
/* if we did it right, tl_clear should be scheduled only after
|
||||||
* this, so this should not be necessary! */
|
* this, so this should not be necessary! */
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HANDED_OVER_TO_NETWORK:
|
case HANDED_OVER_TO_NETWORK:
|
||||||
@ -562,7 +584,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
}
|
}
|
||||||
req->rq_state &= ~RQ_NET_QUEUED;
|
req->rq_state &= ~RQ_NET_QUEUED;
|
||||||
req->rq_state |= RQ_NET_SENT;
|
req->rq_state |= RQ_NET_SENT;
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OOS_HANDED_TO_NETWORK:
|
case OOS_HANDED_TO_NETWORK:
|
||||||
@ -570,7 +592,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
* as far as this connection is concerned. */
|
* as far as this connection is concerned. */
|
||||||
req->rq_state &= ~RQ_NET_QUEUED;
|
req->rq_state &= ~RQ_NET_QUEUED;
|
||||||
req->rq_state |= RQ_NET_DONE;
|
req->rq_state |= RQ_NET_DONE;
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CONNECTION_LOST_WHILE_PENDING:
|
case CONNECTION_LOST_WHILE_PENDING:
|
||||||
@ -591,7 +613,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
if (!(req->rq_state & RQ_NET_QUEUED)) {
|
if (!(req->rq_state & RQ_NET_QUEUED)) {
|
||||||
if (p)
|
if (p)
|
||||||
goto goto_read_retry_local;
|
goto goto_read_retry_local;
|
||||||
_req_may_be_done(req, m); /* Allowed while state.susp */
|
req_may_be_completed(req, m); /* Allowed while state.susp */
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -624,7 +646,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
|
atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
|
||||||
req->rq_state &= ~RQ_NET_PENDING;
|
req->rq_state &= ~RQ_NET_PENDING;
|
||||||
maybe_wakeup_conflicting_requests(req);
|
maybe_wakeup_conflicting_requests(req);
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case POSTPONE_WRITE:
|
case POSTPONE_WRITE:
|
||||||
@ -636,7 +658,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
D_ASSERT(req->rq_state & RQ_NET_PENDING);
|
D_ASSERT(req->rq_state & RQ_NET_PENDING);
|
||||||
req->rq_state |= RQ_POSTPONED;
|
req->rq_state |= RQ_POSTPONED;
|
||||||
maybe_wakeup_conflicting_requests(req);
|
maybe_wakeup_conflicting_requests(req);
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NEG_ACKED:
|
case NEG_ACKED:
|
||||||
@ -654,13 +676,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
goto goto_read_retry_local;
|
goto goto_read_retry_local;
|
||||||
|
|
||||||
maybe_wakeup_conflicting_requests(req);
|
maybe_wakeup_conflicting_requests(req);
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
/* else: done by HANDED_OVER_TO_NETWORK */
|
/* else: done by HANDED_OVER_TO_NETWORK */
|
||||||
break;
|
break;
|
||||||
|
|
||||||
goto_read_retry_local:
|
goto_read_retry_local:
|
||||||
if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
|
if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
|
D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
|
||||||
@ -675,7 +697,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
|
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
_req_may_be_done(req, m); /* Allowed while state.susp */
|
req_may_be_completed(req, m); /* Allowed while state.susp */
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RESTART_FROZEN_DISK_IO:
|
case RESTART_FROZEN_DISK_IO:
|
||||||
@ -696,8 +718,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
case RESEND:
|
case RESEND:
|
||||||
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
|
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
|
||||||
before the connection loss (B&C only); only P_BARRIER_ACK was missing.
|
before the connection loss (B&C only); only P_BARRIER_ACK was missing.
|
||||||
Trowing them out of the TL here by pretending we got a BARRIER_ACK
|
Throwing them out of the TL here by pretending we got a BARRIER_ACK.
|
||||||
We ensure that the peer was not rebooted */
|
During connection handshake, we ensure that the peer was not rebooted. */
|
||||||
if (!(req->rq_state & RQ_NET_OK)) {
|
if (!(req->rq_state & RQ_NET_OK)) {
|
||||||
if (req->w.cb) {
|
if (req->w.cb) {
|
||||||
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
||||||
@ -723,7 +745,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)))
|
if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)))
|
||||||
atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
|
atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
|
||||||
}
|
}
|
||||||
_req_may_be_done(req, m); /* Allowed while state.susp */
|
req_may_be_done(req); /* Allowed while state.susp */
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DATA_RECEIVED:
|
case DATA_RECEIVED:
|
||||||
@ -731,7 +753,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||||||
dec_ap_pending(mdev);
|
dec_ap_pending(mdev);
|
||||||
req->rq_state &= ~RQ_NET_PENDING;
|
req->rq_state &= ~RQ_NET_PENDING;
|
||||||
req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
|
req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
|
||||||
_req_may_be_done_not_susp(req, m);
|
req_may_be_completed_not_susp(req, m);
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user