drbd: use bio op accessors
Separate the op from the rq_flag_bits and have drbd set/get the bio using bio_set_op_attrs/bio_op. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
ad0d9e76a4
commit
bb3cc85e16
@ -137,19 +137,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
|
|||||||
|
|
||||||
static int _drbd_md_sync_page_io(struct drbd_device *device,
|
static int _drbd_md_sync_page_io(struct drbd_device *device,
|
||||||
struct drbd_backing_dev *bdev,
|
struct drbd_backing_dev *bdev,
|
||||||
sector_t sector, int rw)
|
sector_t sector, int op)
|
||||||
{
|
{
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
/* we do all our meta data IO in aligned 4k blocks. */
|
/* we do all our meta data IO in aligned 4k blocks. */
|
||||||
const int size = 4096;
|
const int size = 4096;
|
||||||
int err;
|
int err, op_flags = 0;
|
||||||
|
|
||||||
device->md_io.done = 0;
|
device->md_io.done = 0;
|
||||||
device->md_io.error = -ENODEV;
|
device->md_io.error = -ENODEV;
|
||||||
|
|
||||||
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
|
if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
|
||||||
rw |= REQ_FUA | REQ_FLUSH;
|
op_flags |= REQ_FUA | REQ_FLUSH;
|
||||||
rw |= REQ_SYNC | REQ_NOIDLE;
|
op_flags |= REQ_SYNC | REQ_NOIDLE;
|
||||||
|
|
||||||
bio = bio_alloc_drbd(GFP_NOIO);
|
bio = bio_alloc_drbd(GFP_NOIO);
|
||||||
bio->bi_bdev = bdev->md_bdev;
|
bio->bi_bdev = bdev->md_bdev;
|
||||||
@ -159,9 +159,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
|
|||||||
goto out;
|
goto out;
|
||||||
bio->bi_private = device;
|
bio->bi_private = device;
|
||||||
bio->bi_end_io = drbd_md_endio;
|
bio->bi_end_io = drbd_md_endio;
|
||||||
bio->bi_rw = rw;
|
bio_set_op_attrs(bio, op, op_flags);
|
||||||
|
|
||||||
if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
|
if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
|
||||||
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
|
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
|
||||||
;
|
;
|
||||||
else if (!get_ldev_if_state(device, D_ATTACHING)) {
|
else if (!get_ldev_if_state(device, D_ATTACHING)) {
|
||||||
@ -174,7 +174,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
|
|||||||
bio_get(bio); /* one bio_put() is in the completion handler */
|
bio_get(bio); /* one bio_put() is in the completion handler */
|
||||||
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
|
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
|
||||||
device->md_io.submit_jif = jiffies;
|
device->md_io.submit_jif = jiffies;
|
||||||
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
|
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
|
||||||
bio_io_error(bio);
|
bio_io_error(bio);
|
||||||
else
|
else
|
||||||
submit_bio(bio);
|
submit_bio(bio);
|
||||||
@ -188,7 +188,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
|
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
|
||||||
sector_t sector, int rw)
|
sector_t sector, int op)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
|
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
|
||||||
@ -197,19 +197,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
|
|||||||
|
|
||||||
dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
|
dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
|
||||||
current->comm, current->pid, __func__,
|
current->comm, current->pid, __func__,
|
||||||
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
|
(unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
|
||||||
(void*)_RET_IP_ );
|
(void*)_RET_IP_ );
|
||||||
|
|
||||||
if (sector < drbd_md_first_sector(bdev) ||
|
if (sector < drbd_md_first_sector(bdev) ||
|
||||||
sector + 7 > drbd_md_last_sector(bdev))
|
sector + 7 > drbd_md_last_sector(bdev))
|
||||||
drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
|
drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
|
||||||
current->comm, current->pid, __func__,
|
current->comm, current->pid, __func__,
|
||||||
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
|
(unsigned long long)sector,
|
||||||
|
(op == REQ_OP_WRITE) ? "WRITE" : "READ");
|
||||||
|
|
||||||
err = _drbd_md_sync_page_io(device, bdev, sector, rw);
|
err = _drbd_md_sync_page_io(device, bdev, sector, op);
|
||||||
if (err) {
|
if (err) {
|
||||||
drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
|
drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
|
||||||
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
|
(unsigned long long)sector,
|
||||||
|
(op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -980,7 +980,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
|
|||||||
struct drbd_bitmap *b = device->bitmap;
|
struct drbd_bitmap *b = device->bitmap;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
|
unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
|
||||||
|
|
||||||
sector_t on_disk_sector =
|
sector_t on_disk_sector =
|
||||||
device->ldev->md.md_offset + device->ldev->md.bm_offset;
|
device->ldev->md.md_offset + device->ldev->md.bm_offset;
|
||||||
@ -1011,9 +1011,9 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
|
|||||||
bio_add_page(bio, page, len, 0);
|
bio_add_page(bio, page, len, 0);
|
||||||
bio->bi_private = ctx;
|
bio->bi_private = ctx;
|
||||||
bio->bi_end_io = drbd_bm_endio;
|
bio->bi_end_io = drbd_bm_endio;
|
||||||
bio->bi_rw = rw;
|
bio_set_op_attrs(bio, op, 0);
|
||||||
|
|
||||||
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
|
if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
|
||||||
bio_io_error(bio);
|
bio_io_error(bio);
|
||||||
} else {
|
} else {
|
||||||
submit_bio(bio);
|
submit_bio(bio);
|
||||||
|
@ -1507,7 +1507,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
|
|||||||
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
|
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
|
||||||
extern void drbd_md_put_buffer(struct drbd_device *device);
|
extern void drbd_md_put_buffer(struct drbd_device *device);
|
||||||
extern int drbd_md_sync_page_io(struct drbd_device *device,
|
extern int drbd_md_sync_page_io(struct drbd_device *device,
|
||||||
struct drbd_backing_dev *bdev, sector_t sector, int rw);
|
struct drbd_backing_dev *bdev, sector_t sector, int op);
|
||||||
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
|
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
|
||||||
extern void wait_until_done_or_force_detached(struct drbd_device *device,
|
extern void wait_until_done_or_force_detached(struct drbd_device *device,
|
||||||
struct drbd_backing_dev *bdev, unsigned int *done);
|
struct drbd_backing_dev *bdev, unsigned int *done);
|
||||||
@ -1557,7 +1557,7 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector
|
|||||||
bool throttle_if_app_is_waiting);
|
bool throttle_if_app_is_waiting);
|
||||||
extern int drbd_submit_peer_request(struct drbd_device *,
|
extern int drbd_submit_peer_request(struct drbd_device *,
|
||||||
struct drbd_peer_request *, const unsigned,
|
struct drbd_peer_request *, const unsigned,
|
||||||
const int);
|
const unsigned, const int);
|
||||||
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
|
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
|
||||||
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
|
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
|
||||||
sector_t, unsigned int,
|
sector_t, unsigned int,
|
||||||
|
@ -1603,15 +1603,16 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
|
static u32 bio_flags_to_wire(struct drbd_connection *connection,
|
||||||
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
if (connection->agreed_pro_version >= 95)
|
if (connection->agreed_pro_version >= 95)
|
||||||
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
|
return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
|
||||||
(bi_rw & REQ_FUA ? DP_FUA : 0) |
|
(bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
|
||||||
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
|
(bio->bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
|
||||||
(bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
|
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
|
||||||
else
|
else
|
||||||
return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
|
return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Used to send write or TRIM aka REQ_DISCARD requests
|
/* Used to send write or TRIM aka REQ_DISCARD requests
|
||||||
@ -1636,7 +1637,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
|
|||||||
p->sector = cpu_to_be64(req->i.sector);
|
p->sector = cpu_to_be64(req->i.sector);
|
||||||
p->block_id = (unsigned long)req;
|
p->block_id = (unsigned long)req;
|
||||||
p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
|
p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
|
||||||
dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
|
dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
|
||||||
if (device->state.conn >= C_SYNC_SOURCE &&
|
if (device->state.conn >= C_SYNC_SOURCE &&
|
||||||
device->state.conn <= C_PAUSED_SYNC_T)
|
device->state.conn <= C_PAUSED_SYNC_T)
|
||||||
dp_flags |= DP_MAY_SET_IN_SYNC;
|
dp_flags |= DP_MAY_SET_IN_SYNC;
|
||||||
@ -3061,7 +3062,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
|
|||||||
D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
|
D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
|
||||||
sector = device->ldev->md.md_offset;
|
sector = device->ldev->md.md_offset;
|
||||||
|
|
||||||
if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
|
if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
|
||||||
/* this was a try anyways ... */
|
/* this was a try anyways ... */
|
||||||
drbd_err(device, "meta data update failed!\n");
|
drbd_err(device, "meta data update failed!\n");
|
||||||
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
|
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
|
||||||
@ -3263,7 +3264,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
|
|||||||
* Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
|
* Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
|
||||||
bdev->md.md_size_sect = 8;
|
bdev->md.md_size_sect = 8;
|
||||||
|
|
||||||
if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
|
if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
|
||||||
|
REQ_OP_READ)) {
|
||||||
/* NOTE: can't do normal error processing here as this is
|
/* NOTE: can't do normal error processing here as this is
|
||||||
called BEFORE disk is attached */
|
called BEFORE disk is attached */
|
||||||
drbd_err(device, "Error while reading metadata.\n");
|
drbd_err(device, "Error while reading metadata.\n");
|
||||||
|
@ -1398,7 +1398,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
|
|||||||
/* TODO allocate from our own bio_set. */
|
/* TODO allocate from our own bio_set. */
|
||||||
int drbd_submit_peer_request(struct drbd_device *device,
|
int drbd_submit_peer_request(struct drbd_device *device,
|
||||||
struct drbd_peer_request *peer_req,
|
struct drbd_peer_request *peer_req,
|
||||||
const unsigned rw, const int fault_type)
|
const unsigned op, const unsigned op_flags,
|
||||||
|
const int fault_type)
|
||||||
{
|
{
|
||||||
struct bio *bios = NULL;
|
struct bio *bios = NULL;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
@ -1450,7 +1451,7 @@ next_bio:
|
|||||||
/* > peer_req->i.sector, unless this is the first bio */
|
/* > peer_req->i.sector, unless this is the first bio */
|
||||||
bio->bi_iter.bi_sector = sector;
|
bio->bi_iter.bi_sector = sector;
|
||||||
bio->bi_bdev = device->ldev->backing_bdev;
|
bio->bi_bdev = device->ldev->backing_bdev;
|
||||||
bio->bi_rw = rw;
|
bio_set_op_attrs(bio, op, op_flags);
|
||||||
bio->bi_private = peer_req;
|
bio->bi_private = peer_req;
|
||||||
bio->bi_end_io = drbd_peer_request_endio;
|
bio->bi_end_io = drbd_peer_request_endio;
|
||||||
|
|
||||||
@ -1458,7 +1459,7 @@ next_bio:
|
|||||||
bios = bio;
|
bios = bio;
|
||||||
++n_bios;
|
++n_bios;
|
||||||
|
|
||||||
if (rw & REQ_DISCARD) {
|
if (op == REQ_OP_DISCARD) {
|
||||||
bio->bi_iter.bi_size = data_size;
|
bio->bi_iter.bi_size = data_size;
|
||||||
goto submit;
|
goto submit;
|
||||||
}
|
}
|
||||||
@ -1830,7 +1831,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
|
|||||||
spin_unlock_irq(&device->resource->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
atomic_add(pi->size >> 9, &device->rs_sect_ev);
|
atomic_add(pi->size >> 9, &device->rs_sect_ev);
|
||||||
if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
|
if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
|
||||||
|
DRBD_FAULT_RS_WR) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* don't care for the reason here */
|
/* don't care for the reason here */
|
||||||
@ -2152,12 +2154,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
|
|||||||
/* see also bio_flags_to_wire()
|
/* see also bio_flags_to_wire()
|
||||||
* DRBD_REQ_*, because we need to semantically map the flags to data packet
|
* DRBD_REQ_*, because we need to semantically map the flags to data packet
|
||||||
* flags and back. We may replicate to other kernel versions. */
|
* flags and back. We may replicate to other kernel versions. */
|
||||||
static unsigned long wire_flags_to_bio(u32 dpf)
|
static unsigned long wire_flags_to_bio_flags(u32 dpf)
|
||||||
{
|
{
|
||||||
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
|
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
|
||||||
(dpf & DP_FUA ? REQ_FUA : 0) |
|
(dpf & DP_FUA ? REQ_FUA : 0) |
|
||||||
(dpf & DP_FLUSH ? REQ_FLUSH : 0) |
|
(dpf & DP_FLUSH ? REQ_FLUSH : 0);
|
||||||
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
|
}
|
||||||
|
|
||||||
|
static unsigned long wire_flags_to_bio_op(u32 dpf)
|
||||||
|
{
|
||||||
|
if (dpf & DP_DISCARD)
|
||||||
|
return REQ_OP_DISCARD;
|
||||||
|
else
|
||||||
|
return REQ_OP_WRITE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
|
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
|
||||||
@ -2303,7 +2312,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
|||||||
struct drbd_peer_request *peer_req;
|
struct drbd_peer_request *peer_req;
|
||||||
struct p_data *p = pi->data;
|
struct p_data *p = pi->data;
|
||||||
u32 peer_seq = be32_to_cpu(p->seq_num);
|
u32 peer_seq = be32_to_cpu(p->seq_num);
|
||||||
int rw = WRITE;
|
int op, op_flags;
|
||||||
u32 dp_flags;
|
u32 dp_flags;
|
||||||
int err, tp;
|
int err, tp;
|
||||||
|
|
||||||
@ -2342,14 +2351,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
|||||||
peer_req->flags |= EE_APPLICATION;
|
peer_req->flags |= EE_APPLICATION;
|
||||||
|
|
||||||
dp_flags = be32_to_cpu(p->dp_flags);
|
dp_flags = be32_to_cpu(p->dp_flags);
|
||||||
rw |= wire_flags_to_bio(dp_flags);
|
op = wire_flags_to_bio_op(dp_flags);
|
||||||
|
op_flags = wire_flags_to_bio_flags(dp_flags);
|
||||||
if (pi->cmd == P_TRIM) {
|
if (pi->cmd == P_TRIM) {
|
||||||
struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
|
struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
|
||||||
peer_req->flags |= EE_IS_TRIM;
|
peer_req->flags |= EE_IS_TRIM;
|
||||||
if (!blk_queue_discard(q))
|
if (!blk_queue_discard(q))
|
||||||
peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
|
peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
|
||||||
D_ASSERT(peer_device, peer_req->i.size > 0);
|
D_ASSERT(peer_device, peer_req->i.size > 0);
|
||||||
D_ASSERT(peer_device, rw & REQ_DISCARD);
|
D_ASSERT(peer_device, op == REQ_OP_DISCARD);
|
||||||
D_ASSERT(peer_device, peer_req->pages == NULL);
|
D_ASSERT(peer_device, peer_req->pages == NULL);
|
||||||
} else if (peer_req->pages == NULL) {
|
} else if (peer_req->pages == NULL) {
|
||||||
D_ASSERT(device, peer_req->i.size == 0);
|
D_ASSERT(device, peer_req->i.size == 0);
|
||||||
@ -2433,7 +2443,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
|
|||||||
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
|
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
|
err = drbd_submit_peer_request(device, peer_req, op, op_flags,
|
||||||
|
DRBD_FAULT_DT_WR);
|
||||||
if (!err)
|
if (!err)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -2723,7 +2734,8 @@ submit_for_resync:
|
|||||||
submit:
|
submit:
|
||||||
update_receiver_timing_details(connection, drbd_submit_peer_request);
|
update_receiver_timing_details(connection, drbd_submit_peer_request);
|
||||||
inc_unacked(device);
|
inc_unacked(device);
|
||||||
if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
|
if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
|
||||||
|
fault_type) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* don't care for the reason here */
|
/* don't care for the reason here */
|
||||||
|
@ -174,7 +174,7 @@ void drbd_peer_request_endio(struct bio *bio)
|
|||||||
struct drbd_peer_request *peer_req = bio->bi_private;
|
struct drbd_peer_request *peer_req = bio->bi_private;
|
||||||
struct drbd_device *device = peer_req->peer_device->device;
|
struct drbd_device *device = peer_req->peer_device->device;
|
||||||
int is_write = bio_data_dir(bio) == WRITE;
|
int is_write = bio_data_dir(bio) == WRITE;
|
||||||
int is_discard = !!(bio->bi_rw & REQ_DISCARD);
|
int is_discard = !!(bio_op(bio) == REQ_OP_DISCARD);
|
||||||
|
|
||||||
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
|
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
|
||||||
drbd_warn(device, "%s: error=%d s=%llus\n",
|
drbd_warn(device, "%s: error=%d s=%llus\n",
|
||||||
@ -248,7 +248,7 @@ void drbd_request_endio(struct bio *bio)
|
|||||||
|
|
||||||
/* to avoid recursion in __req_mod */
|
/* to avoid recursion in __req_mod */
|
||||||
if (unlikely(bio->bi_error)) {
|
if (unlikely(bio->bi_error)) {
|
||||||
if (bio->bi_rw & REQ_DISCARD)
|
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||||
what = (bio->bi_error == -EOPNOTSUPP)
|
what = (bio->bi_error == -EOPNOTSUPP)
|
||||||
? DISCARD_COMPLETED_NOTSUPP
|
? DISCARD_COMPLETED_NOTSUPP
|
||||||
: DISCARD_COMPLETED_WITH_ERROR;
|
: DISCARD_COMPLETED_WITH_ERROR;
|
||||||
@ -397,7 +397,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
|
|||||||
spin_unlock_irq(&device->resource->req_lock);
|
spin_unlock_irq(&device->resource->req_lock);
|
||||||
|
|
||||||
atomic_add(size >> 9, &device->rs_sect_ev);
|
atomic_add(size >> 9, &device->rs_sect_ev);
|
||||||
if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
|
if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
|
||||||
|
DRBD_FAULT_RS_RD) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* If it failed because of ENOMEM, retry should help. If it failed
|
/* If it failed because of ENOMEM, retry should help. If it failed
|
||||||
|
Loading…
Reference in New Issue
Block a user