forked from Minki/linux
drbd: Track the numbers of sectors in flight
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
688593c5a8
commit
759fbdfba6
@ -1102,6 +1102,7 @@ struct drbd_conf {
|
||||
struct fifo_buffer rs_plan_s; /* correction values of resync planer */
|
||||
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
|
||||
int rs_planed; /* resync sectors already planed */
|
||||
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
|
||||
};
|
||||
|
||||
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
|
||||
|
@ -2799,6 +2799,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
||||
atomic_set(&mdev->pp_in_use_by_net, 0);
|
||||
atomic_set(&mdev->rs_sect_in, 0);
|
||||
atomic_set(&mdev->rs_sect_ev, 0);
|
||||
atomic_set(&mdev->ap_in_flight, 0);
|
||||
|
||||
mutex_init(&mdev->md_io_mutex);
|
||||
mutex_init(&mdev->data.mutex);
|
||||
|
@ -558,6 +558,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
||||
|
||||
case handed_over_to_network:
|
||||
/* assert something? */
|
||||
if (bio_data_dir(req->master_bio) == WRITE)
|
||||
atomic_add(req->size>>9, &mdev->ap_in_flight);
|
||||
|
||||
if (bio_data_dir(req->master_bio) == WRITE &&
|
||||
mdev->net_conf->wire_protocol == DRBD_PROT_A) {
|
||||
/* this is what is dangerous about protocol A:
|
||||
@ -591,6 +594,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
||||
dec_ap_pending(mdev);
|
||||
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
|
||||
req->rq_state |= RQ_NET_DONE;
|
||||
if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
|
||||
atomic_sub(req->size>>9, &mdev->ap_in_flight);
|
||||
|
||||
/* if it is still queued, we may not complete it here.
|
||||
* it will be canceled soon. */
|
||||
if (!(req->rq_state & RQ_NET_QUEUED))
|
||||
@ -628,14 +634,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
||||
req->rq_state |= RQ_NET_OK;
|
||||
D_ASSERT(req->rq_state & RQ_NET_PENDING);
|
||||
dec_ap_pending(mdev);
|
||||
atomic_sub(req->size>>9, &mdev->ap_in_flight);
|
||||
req->rq_state &= ~RQ_NET_PENDING;
|
||||
_req_may_be_done_not_susp(req, m);
|
||||
break;
|
||||
|
||||
case neg_acked:
|
||||
/* assert something? */
|
||||
if (req->rq_state & RQ_NET_PENDING)
|
||||
if (req->rq_state & RQ_NET_PENDING) {
|
||||
dec_ap_pending(mdev);
|
||||
atomic_sub(req->size>>9, &mdev->ap_in_flight);
|
||||
}
|
||||
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
|
||||
|
||||
req->rq_state |= RQ_NET_DONE;
|
||||
@ -692,6 +701,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
||||
}
|
||||
D_ASSERT(req->rq_state & RQ_NET_SENT);
|
||||
req->rq_state |= RQ_NET_DONE;
|
||||
if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
|
||||
atomic_sub(req->size>>9, &mdev->ap_in_flight);
|
||||
_req_may_be_done(req, m); /* Allowed while state.susp */
|
||||
break;
|
||||
|
||||
|
@ -338,19 +338,21 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* completion of master bio is outside of spinlock.
|
||||
* If you need it irqsave, do it your self!
|
||||
* Which means: don't use from bio endio callback. */
|
||||
/* completion of master bio is outside of our spinlock.
|
||||
* We still may or may not be inside some irqs disabled section
|
||||
* of the lower level driver completion callback, so we need to
|
||||
* spin_lock_irqsave here. */
|
||||
static inline int req_mod(struct drbd_request *req,
|
||||
enum drbd_req_event what)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct drbd_conf *mdev = req->mdev;
|
||||
struct bio_and_error m;
|
||||
int rv;
|
||||
|
||||
spin_lock_irq(&mdev->req_lock);
|
||||
spin_lock_irqsave(&mdev->req_lock, flags);
|
||||
rv = __req_mod(req, what, &m);
|
||||
spin_unlock_irq(&mdev->req_lock);
|
||||
spin_unlock_irqrestore(&mdev->req_lock, flags);
|
||||
|
||||
if (m.bio)
|
||||
complete_master_bio(mdev, &m);
|
||||
|
Loading…
Reference in New Issue
Block a user