forked from Minki/linux
drbd: announce FLUSH/FUA capability to upper layers
In 8.4, we may have bios spanning two activity log extents. Fixup drbd_al_begin_io() and drbd_al_complete_io() to deal with zero sized bios. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
58ffa580a7
commit
81a3537a97
@ -248,11 +248,12 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
|
|||||||
/* for bios crossing activity log extent boundaries,
|
/* for bios crossing activity log extent boundaries,
|
||||||
* we may need to activate two extents in one go */
|
* we may need to activate two extents in one go */
|
||||||
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
|
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
|
||||||
unsigned last = (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
|
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
|
||||||
unsigned enr;
|
unsigned enr;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
|
|
||||||
|
|
||||||
|
D_ASSERT(first <= last);
|
||||||
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
|
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
|
||||||
|
|
||||||
for (enr = first; enr <= last; enr++)
|
for (enr = first; enr <= last; enr++)
|
||||||
@ -305,11 +306,12 @@ void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
|
|||||||
/* for bios crossing activity log extent boundaries,
|
/* for bios crossing activity log extent boundaries,
|
||||||
* we may need to activate two extents in one go */
|
* we may need to activate two extents in one go */
|
||||||
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
|
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
|
||||||
unsigned last = (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
|
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
|
||||||
unsigned enr;
|
unsigned enr;
|
||||||
struct lc_element *extent;
|
struct lc_element *extent;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
D_ASSERT(first <= last);
|
||||||
spin_lock_irqsave(&mdev->al_lock, flags);
|
spin_lock_irqsave(&mdev->al_lock, flags);
|
||||||
|
|
||||||
for (enr = first; enr <= last; enr++) {
|
for (enr = first; enr <= last; enr++) {
|
||||||
@ -756,7 +758,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
|
|||||||
unsigned int enr, count = 0;
|
unsigned int enr, count = 0;
|
||||||
struct lc_element *e;
|
struct lc_element *e;
|
||||||
|
|
||||||
if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
|
/* this should be an empty REQ_FLUSH */
|
||||||
|
if (size == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
|
||||||
dev_err(DEV, "sector: %llus, size: %d\n",
|
dev_err(DEV, "sector: %llus, size: %d\n",
|
||||||
(unsigned long long)sector, size);
|
(unsigned long long)sector, size);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2640,6 +2640,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
|
|||||||
q->backing_dev_info.congested_data = mdev;
|
q->backing_dev_info.congested_data = mdev;
|
||||||
|
|
||||||
blk_queue_make_request(q, drbd_make_request);
|
blk_queue_make_request(q, drbd_make_request);
|
||||||
|
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
|
||||||
/* Setting the max_hw_sectors to an odd value of 8kibyte here
|
/* Setting the max_hw_sectors to an odd value of 8kibyte here
|
||||||
This triggers a max_bio_size message upon first attach or connect */
|
This triggers a max_bio_size message upon first attach or connect */
|
||||||
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
|
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
|
||||||
|
@ -295,6 +295,9 @@ static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_ne
|
|||||||
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
|
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (page == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
|
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
|
||||||
i = page_chain_free(page);
|
i = page_chain_free(page);
|
||||||
else {
|
else {
|
||||||
@ -331,7 +334,7 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|||||||
unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
|
unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
|
||||||
{
|
{
|
||||||
struct drbd_peer_request *peer_req;
|
struct drbd_peer_request *peer_req;
|
||||||
struct page *page;
|
struct page *page = NULL;
|
||||||
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
|
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
|
||||||
@ -344,9 +347,11 @@ drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
|
if (data_size) {
|
||||||
if (!page)
|
page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
|
||||||
goto fail;
|
if (!page)
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
drbd_clear_interval(&peer_req->i);
|
drbd_clear_interval(&peer_req->i);
|
||||||
peer_req->i.size = data_size;
|
peer_req->i.size = data_size;
|
||||||
@ -1513,8 +1518,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|||||||
data_size -= dgs;
|
data_size -= dgs;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!expect(data_size != 0))
|
|
||||||
return NULL;
|
|
||||||
if (!expect(IS_ALIGNED(data_size, 512)))
|
if (!expect(IS_ALIGNED(data_size, 512)))
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
|
if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
|
||||||
@ -1537,6 +1540,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|||||||
if (!peer_req)
|
if (!peer_req)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (!data_size)
|
||||||
|
return peer_req;
|
||||||
|
|
||||||
ds = data_size;
|
ds = data_size;
|
||||||
page = peer_req->pages;
|
page = peer_req->pages;
|
||||||
page_chain_for_each(page) {
|
page_chain_for_each(page) {
|
||||||
@ -2199,6 +2205,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
|
|||||||
|
|
||||||
dp_flags = be32_to_cpu(p->dp_flags);
|
dp_flags = be32_to_cpu(p->dp_flags);
|
||||||
rw |= wire_flags_to_bio(mdev, dp_flags);
|
rw |= wire_flags_to_bio(mdev, dp_flags);
|
||||||
|
if (peer_req->pages == NULL) {
|
||||||
|
D_ASSERT(peer_req->i.size == 0);
|
||||||
|
D_ASSERT(dp_flags & DP_FLUSH);
|
||||||
|
}
|
||||||
|
|
||||||
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
||||||
peer_req->flags |= EE_MAY_SET_IN_SYNC;
|
peer_req->flags |= EE_MAY_SET_IN_SYNC;
|
||||||
|
@ -1097,7 +1097,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
/*
|
/*
|
||||||
* what we "blindly" assume:
|
* what we "blindly" assume:
|
||||||
*/
|
*/
|
||||||
D_ASSERT(bio->bi_size > 0);
|
|
||||||
D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
|
D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
|
||||||
|
|
||||||
inc_ap_bio(mdev);
|
inc_ap_bio(mdev);
|
||||||
|
Loading…
Reference in New Issue
Block a user