Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe:
"The major piece in here is the immutable bio_ve series from Kent, the
rest is fairly minor. It was supposed to go in last round, but
various issues pushed it to this release instead. The pull request
contains:
- Various smaller blk-mq fixes from different folks. Nothing major
here, just minor fixes and cleanups.
- Fix for a memory leak in the error path in the block ioctl code
from Christian Engelmayer.
- Header export fix from CaiZhiyong.
- Finally the immutable biovec changes from Kent Overstreet. This
enables some nice future work on making arbitrarily sized bios
possible, and splitting more efficient. Related fixes to immutable
bio_vecs:
- dm-cache immutable fixup from Mike Snitzer.
- btrfs immutable fixup from Muthu Kumar.
- bio-integrity fix from Nic Bellinger, which is also going to stable"
* 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits)
xtensa: fixup simdisk driver to work with immutable bio_vecs
block/blk-mq-cpu.c: use hotcpu_notifier()
blk-mq: for_each_* macro correctness
block: Fix memory leak in rw_copy_check_uvector() handling
bio-integrity: Fix bio_integrity_verify segment start bug
block: remove unrelated header files and export symbol
blk-mq: uses page->list incorrectly
blk-mq: use __smp_call_function_single directly
btrfs: fix missing increment of bi_remaining
Revert "block: Warn and free bio if bi_end_io is not set"
block: Warn and free bio if bi_end_io is not set
blk-mq: fix initializing request's start time
block: blk-mq: don't export blk_mq_free_queue()
block: blk-mq: make blk_sync_queue support mq
block: blk-mq: support draining mq queue
dm cache: increment bi_remaining when bi_end_io is restored
block: fixup for generic bio chaining
block: Really silence spurious compiler warnings
block: Silence spurious compiler warnings
block: Kill bio_pair_split()
...
This commit is contained in:
@@ -100,11 +100,8 @@ enum {
|
||||
|
||||
struct buf {
|
||||
ulong nframesout;
|
||||
ulong resid;
|
||||
ulong bv_resid;
|
||||
sector_t sector;
|
||||
struct bio *bio;
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter iter;
|
||||
struct request *rq;
|
||||
};
|
||||
|
||||
@@ -120,13 +117,10 @@ struct frame {
|
||||
ulong waited;
|
||||
ulong waited_total;
|
||||
struct aoetgt *t; /* parent target I belong to */
|
||||
sector_t lba;
|
||||
struct sk_buff *skb; /* command skb freed on module exit */
|
||||
struct sk_buff *r_skb; /* response skb for async processing */
|
||||
struct buf *buf;
|
||||
struct bio_vec *bv;
|
||||
ulong bcnt;
|
||||
ulong bv_off;
|
||||
struct bvec_iter iter;
|
||||
char flags;
|
||||
};
|
||||
|
||||
|
||||
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
|
||||
|
||||
t = f->t;
|
||||
f->buf = NULL;
|
||||
f->lba = 0;
|
||||
f->bv = NULL;
|
||||
memset(&f->iter, 0, sizeof(f->iter));
|
||||
f->r_skb = NULL;
|
||||
f->flags = 0;
|
||||
list_add(&f->head, &t->ffree);
|
||||
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
|
||||
}
|
||||
|
||||
static void
|
||||
skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
|
||||
skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
|
||||
{
|
||||
int frag = 0;
|
||||
ulong fcnt;
|
||||
loop:
|
||||
fcnt = bv->bv_len - (off - bv->bv_offset);
|
||||
if (fcnt > cnt)
|
||||
fcnt = cnt;
|
||||
skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
|
||||
cnt -= fcnt;
|
||||
if (cnt <= 0)
|
||||
return;
|
||||
bv++;
|
||||
off = bv->bv_offset;
|
||||
goto loop;
|
||||
struct bio_vec bv;
|
||||
|
||||
__bio_for_each_segment(bv, bio, iter, iter)
|
||||
skb_fill_page_desc(skb, frag++, bv.bv_page,
|
||||
bv.bv_offset, bv.bv_len);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
|
||||
t->nout++;
|
||||
f->waited = 0;
|
||||
f->waited_total = 0;
|
||||
if (f->buf)
|
||||
f->lba = f->buf->sector;
|
||||
|
||||
/* set up ata header */
|
||||
ah->scnt = f->bcnt >> 9;
|
||||
put_lba(ah, f->lba);
|
||||
ah->scnt = f->iter.bi_size >> 9;
|
||||
put_lba(ah, f->iter.bi_sector);
|
||||
if (t->d->flags & DEVFL_EXT) {
|
||||
ah->aflags |= AOEAFL_EXT;
|
||||
} else {
|
||||
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
|
||||
ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
|
||||
}
|
||||
if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
|
||||
skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
|
||||
skb_fillup(skb, f->buf->bio, f->iter);
|
||||
ah->aflags |= AOEAFL_WRITE;
|
||||
skb->len += f->bcnt;
|
||||
skb->data_len = f->bcnt;
|
||||
skb->truesize += f->bcnt;
|
||||
skb->len += f->iter.bi_size;
|
||||
skb->data_len = f->iter.bi_size;
|
||||
skb->truesize += f->iter.bi_size;
|
||||
t->wpkts++;
|
||||
} else {
|
||||
t->rpkts++;
|
||||
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
|
||||
struct buf *buf;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff_head queue;
|
||||
ulong bcnt, fbcnt;
|
||||
|
||||
buf = nextbuf(d);
|
||||
if (buf == NULL)
|
||||
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
|
||||
f = newframe(d);
|
||||
if (f == NULL)
|
||||
return 0;
|
||||
bcnt = d->maxbcnt;
|
||||
if (bcnt == 0)
|
||||
bcnt = DEFAULTBCNT;
|
||||
if (bcnt > buf->resid)
|
||||
bcnt = buf->resid;
|
||||
fbcnt = bcnt;
|
||||
f->bv = buf->bv;
|
||||
f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
|
||||
do {
|
||||
if (fbcnt < buf->bv_resid) {
|
||||
buf->bv_resid -= fbcnt;
|
||||
buf->resid -= fbcnt;
|
||||
break;
|
||||
}
|
||||
fbcnt -= buf->bv_resid;
|
||||
buf->resid -= buf->bv_resid;
|
||||
if (buf->resid == 0) {
|
||||
d->ip.buf = NULL;
|
||||
break;
|
||||
}
|
||||
buf->bv++;
|
||||
buf->bv_resid = buf->bv->bv_len;
|
||||
WARN_ON(buf->bv_resid == 0);
|
||||
} while (fbcnt);
|
||||
|
||||
/* initialize the headers & frame */
|
||||
f->buf = buf;
|
||||
f->bcnt = bcnt;
|
||||
ata_rw_frameinit(f);
|
||||
f->iter = buf->iter;
|
||||
f->iter.bi_size = min_t(unsigned long,
|
||||
d->maxbcnt ?: DEFAULTBCNT,
|
||||
f->iter.bi_size);
|
||||
bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
|
||||
|
||||
if (!buf->iter.bi_size)
|
||||
d->ip.buf = NULL;
|
||||
|
||||
/* mark all tracking fields and load out */
|
||||
buf->nframesout += 1;
|
||||
buf->sector += bcnt >> 9;
|
||||
|
||||
ata_rw_frameinit(f);
|
||||
|
||||
skb = skb_clone(f->skb, GFP_ATOMIC);
|
||||
if (skb) {
|
||||
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
|
||||
skb = nf->skb;
|
||||
nf->skb = f->skb;
|
||||
nf->buf = f->buf;
|
||||
nf->bcnt = f->bcnt;
|
||||
nf->lba = f->lba;
|
||||
nf->bv = f->bv;
|
||||
nf->bv_off = f->bv_off;
|
||||
nf->iter = f->iter;
|
||||
nf->waited = 0;
|
||||
nf->waited_total = f->waited_total;
|
||||
nf->sent = f->sent;
|
||||
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
|
||||
}
|
||||
f->flags |= FFL_PROBE;
|
||||
ifrotate(t);
|
||||
f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
|
||||
f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
|
||||
ata_rw_frameinit(f);
|
||||
skb = f->skb;
|
||||
for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
|
||||
for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
|
||||
if (n < PAGE_SIZE)
|
||||
m = n;
|
||||
else
|
||||
m = PAGE_SIZE;
|
||||
skb_fill_page_desc(skb, frag, empty_page, 0, m);
|
||||
}
|
||||
skb->len += f->bcnt;
|
||||
skb->data_len = f->bcnt;
|
||||
skb->truesize += f->bcnt;
|
||||
skb->len += f->iter.bi_size;
|
||||
skb->data_len = f->iter.bi_size;
|
||||
skb->truesize += f->iter.bi_size;
|
||||
|
||||
skb = skb_clone(f->skb, GFP_ATOMIC);
|
||||
if (skb) {
|
||||
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
|
||||
static void
|
||||
bio_pageinc(struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
struct page *page;
|
||||
int i;
|
||||
struct bvec_iter iter;
|
||||
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
/* Non-zero page count for non-head members of
|
||||
* compound pages is no longer allowed by the kernel.
|
||||
*/
|
||||
page = compound_trans_head(bv->bv_page);
|
||||
page = compound_trans_head(bv.bv_page);
|
||||
atomic_inc(&page->_count);
|
||||
}
|
||||
}
|
||||
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
|
||||
static void
|
||||
bio_pagedec(struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct page *page;
|
||||
int i;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
|
||||
bio_for_each_segment(bv, bio, i) {
|
||||
page = compound_trans_head(bv->bv_page);
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
page = compound_trans_head(bv.bv_page);
|
||||
atomic_dec(&page->_count);
|
||||
}
|
||||
}
|
||||
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
buf->rq = rq;
|
||||
buf->bio = bio;
|
||||
buf->resid = bio->bi_size;
|
||||
buf->sector = bio->bi_sector;
|
||||
buf->iter = bio->bi_iter;
|
||||
bio_pageinc(bio);
|
||||
buf->bv = bio_iovec(bio);
|
||||
buf->bv_resid = buf->bv->bv_len;
|
||||
WARN_ON(buf->bv_resid == 0);
|
||||
}
|
||||
|
||||
static struct buf *
|
||||
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
|
||||
}
|
||||
|
||||
static void
|
||||
bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
|
||||
bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
|
||||
{
|
||||
ulong fcnt;
|
||||
char *p;
|
||||
int soff = 0;
|
||||
loop:
|
||||
fcnt = bv->bv_len - (off - bv->bv_offset);
|
||||
if (fcnt > cnt)
|
||||
fcnt = cnt;
|
||||
p = page_address(bv->bv_page) + off;
|
||||
skb_copy_bits(skb, soff, p, fcnt);
|
||||
soff += fcnt;
|
||||
cnt -= fcnt;
|
||||
if (cnt <= 0)
|
||||
return;
|
||||
bv++;
|
||||
off = bv->bv_offset;
|
||||
goto loop;
|
||||
struct bio_vec bv;
|
||||
|
||||
iter.bi_size = cnt;
|
||||
|
||||
__bio_for_each_segment(bv, bio, iter, iter) {
|
||||
char *p = page_address(bv.bv_page) + bv.bv_offset;
|
||||
skb_copy_bits(skb, soff, p, bv.bv_len);
|
||||
soff += bv.bv_len;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
|
||||
do {
|
||||
bio = rq->bio;
|
||||
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
|
||||
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
|
||||
|
||||
/* cf. http://lkml.org/lkml/2006/10/31/28 */
|
||||
if (!fastfail)
|
||||
@@ -1229,7 +1188,15 @@ noskb: if (buf)
|
||||
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
|
||||
break;
|
||||
}
|
||||
bvcpy(f->bv, f->bv_off, skb, n);
|
||||
if (n > f->iter.bi_size) {
|
||||
pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
|
||||
"aoe: too-large data size in read from",
|
||||
(long) d->aoemajor, d->aoeminor,
|
||||
n, f->iter.bi_size);
|
||||
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
|
||||
break;
|
||||
}
|
||||
bvcpy(skb, f->buf->bio, f->iter, n);
|
||||
case ATA_CMD_PIO_WRITE:
|
||||
case ATA_CMD_PIO_WRITE_EXT:
|
||||
spin_lock_irq(&d->lock);
|
||||
@@ -1272,7 +1239,7 @@ out:
|
||||
|
||||
aoe_freetframe(f);
|
||||
|
||||
if (buf && --buf->nframesout == 0 && buf->resid == 0)
|
||||
if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
|
||||
aoe_end_buf(d, buf);
|
||||
|
||||
spin_unlock_irq(&d->lock);
|
||||
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
|
||||
{
|
||||
if (buf == NULL)
|
||||
return;
|
||||
buf->resid = 0;
|
||||
buf->iter.bi_size = 0;
|
||||
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
|
||||
if (buf->nframesout == 0)
|
||||
aoe_end_buf(d, buf);
|
||||
|
||||
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
struct block_device *bdev = bio->bi_bdev;
|
||||
struct brd_device *brd = bdev->bd_disk->private_data;
|
||||
int rw;
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
sector_t sector;
|
||||
int i;
|
||||
struct bvec_iter iter;
|
||||
int err = -EIO;
|
||||
|
||||
sector = bio->bi_sector;
|
||||
sector = bio->bi_iter.bi_sector;
|
||||
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
|
||||
goto out;
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
err = 0;
|
||||
discard_from_brd(brd, sector, bio->bi_size);
|
||||
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
if (rw == READA)
|
||||
rw = READ;
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
unsigned int len = bvec->bv_len;
|
||||
err = brd_do_bvec(brd, bvec->bv_page, len,
|
||||
bvec->bv_offset, rw, sector);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
unsigned int len = bvec.bv_len;
|
||||
err = brd_do_bvec(brd, bvec.bv_page, len,
|
||||
bvec.bv_offset, rw, sector);
|
||||
if (err)
|
||||
break;
|
||||
sector += len >> SECTOR_SHIFT;
|
||||
|
||||
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
|
||||
bio = bio_alloc_drbd(GFP_NOIO);
|
||||
bio->bi_bdev = bdev->md_bdev;
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
err = -EIO;
|
||||
if (bio_add_page(bio, page, size, 0) != size)
|
||||
goto out;
|
||||
|
||||
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
|
||||
} else
|
||||
page = b->bm_pages[page_nr];
|
||||
bio->bi_bdev = mdev->ldev->md_bdev;
|
||||
bio->bi_sector = on_disk_sector;
|
||||
bio->bi_iter.bi_sector = on_disk_sector;
|
||||
/* bio_add_page of a single page to an empty bio will always succeed,
|
||||
* according to api. Do we want to assert that? */
|
||||
bio_add_page(bio, page, len, 0);
|
||||
|
||||
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
|
||||
|
||||
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
|
||||
/* hint all but last page with MSG_MORE */
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
int err;
|
||||
|
||||
err = _drbd_no_send_page(mdev, bvec->bv_page,
|
||||
bvec->bv_offset, bvec->bv_len,
|
||||
i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
|
||||
err = _drbd_no_send_page(mdev, bvec.bv_page,
|
||||
bvec.bv_offset, bvec.bv_len,
|
||||
bio_iter_last(bvec, iter)
|
||||
? 0 : MSG_MORE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
|
||||
|
||||
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
|
||||
/* hint all but last page with MSG_MORE */
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
int err;
|
||||
|
||||
err = _drbd_send_page(mdev, bvec->bv_page,
|
||||
bvec->bv_offset, bvec->bv_len,
|
||||
i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
|
||||
err = _drbd_send_page(mdev, bvec.bv_page,
|
||||
bvec.bv_offset, bvec.bv_len,
|
||||
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1333,7 +1333,7 @@ next_bio:
|
||||
goto fail;
|
||||
}
|
||||
/* > peer_req->i.sector, unless this is the first bio */
|
||||
bio->bi_sector = sector;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_bdev = mdev->ldev->backing_bdev;
|
||||
bio->bi_rw = rw;
|
||||
bio->bi_private = peer_req;
|
||||
@@ -1353,7 +1353,7 @@ next_bio:
|
||||
dev_err(DEV,
|
||||
"bio_add_page failed for len=%u, "
|
||||
"bi_vcnt=0 (bi_sector=%llu)\n",
|
||||
len, (unsigned long long)bio->bi_sector);
|
||||
len, (uint64_t)bio->bi_iter.bi_sector);
|
||||
err = -ENOSPC;
|
||||
goto fail;
|
||||
}
|
||||
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
|
||||
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
|
||||
sector_t sector, int data_size)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct bio *bio;
|
||||
int dgs, err, i, expect;
|
||||
int dgs, err, expect;
|
||||
void *dig_in = mdev->tconn->int_dig_in;
|
||||
void *dig_vv = mdev->tconn->int_dig_vv;
|
||||
|
||||
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
|
||||
mdev->recv_cnt += data_size>>9;
|
||||
|
||||
bio = req->master_bio;
|
||||
D_ASSERT(sector == bio->bi_sector);
|
||||
D_ASSERT(sector == bio->bi_iter.bi_sector);
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
|
||||
expect = min_t(int, data_size, bvec->bv_len);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
|
||||
expect = min_t(int, data_size, bvec.bv_len);
|
||||
err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
|
||||
kunmap(bvec->bv_page);
|
||||
kunmap(bvec.bv_page);
|
||||
if (err)
|
||||
return err;
|
||||
data_size -= expect;
|
||||
|
||||
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
|
||||
req->epoch = 0;
|
||||
|
||||
drbd_clear_interval(&req->i);
|
||||
req->i.sector = bio_src->bi_sector;
|
||||
req->i.size = bio_src->bi_size;
|
||||
req->i.sector = bio_src->bi_iter.bi_sector;
|
||||
req->i.size = bio_src->bi_iter.bi_size;
|
||||
req->i.local = true;
|
||||
req->i.waiting = false;
|
||||
|
||||
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
|
||||
/*
|
||||
* what we "blindly" assume:
|
||||
*/
|
||||
D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
|
||||
D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
|
||||
|
||||
inc_ap_bio(mdev);
|
||||
__drbd_make_request(mdev, bio, start_time);
|
||||
|
||||
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
|
||||
|
||||
/* Short lived temporary struct on the stack.
|
||||
* We could squirrel the error to be returned into
|
||||
* bio->bi_size, or similar. But that would be too ugly. */
|
||||
* bio->bi_iter.bi_size, or similar. But that would be too ugly. */
|
||||
struct bio_and_error {
|
||||
struct bio *bio;
|
||||
int error;
|
||||
|
||||
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
|
||||
{
|
||||
struct hash_desc desc;
|
||||
struct scatterlist sg;
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.flags = 0;
|
||||
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
|
||||
sg_init_table(&sg, 1);
|
||||
crypto_hash_init(&desc);
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
|
||||
crypto_hash_update(&desc, &sg, sg.length);
|
||||
}
|
||||
crypto_hash_final(&desc, digest);
|
||||
|
||||
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
|
||||
/* Compute maximal contiguous buffer size. */
|
||||
static int buffer_chain_size(void)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
int size;
|
||||
struct req_iterator iter;
|
||||
char *base;
|
||||
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
|
||||
size = 0;
|
||||
|
||||
rq_for_each_segment(bv, current_req, iter) {
|
||||
if (page_address(bv->bv_page) + bv->bv_offset != base + size)
|
||||
if (page_address(bv.bv_page) + bv.bv_offset != base + size)
|
||||
break;
|
||||
|
||||
size += bv->bv_len;
|
||||
size += bv.bv_len;
|
||||
}
|
||||
|
||||
return size >> 9;
|
||||
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
|
||||
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
|
||||
{
|
||||
int remaining; /* number of transferred 512-byte sectors */
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
char *buffer;
|
||||
char *dma_buffer;
|
||||
int size;
|
||||
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
|
||||
if (!remaining)
|
||||
break;
|
||||
|
||||
size = bv->bv_len;
|
||||
size = bv.bv_len;
|
||||
SUPBOUND(size, remaining);
|
||||
|
||||
buffer = page_address(bv->bv_page) + bv->bv_offset;
|
||||
buffer = page_address(bv.bv_page) + bv.bv_offset;
|
||||
if (dma_buffer + size >
|
||||
floppy_track_buffer + (max_buffer_sectors << 10) ||
|
||||
dma_buffer < floppy_track_buffer) {
|
||||
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
|
||||
bio_vec.bv_len = size;
|
||||
bio_vec.bv_offset = 0;
|
||||
bio.bi_vcnt = 1;
|
||||
bio.bi_size = size;
|
||||
bio.bi_iter.bi_size = size;
|
||||
bio.bi_bdev = bdev;
|
||||
bio.bi_sector = 0;
|
||||
bio.bi_iter.bi_sector = 0;
|
||||
bio.bi_flags = (1 << BIO_QUIET);
|
||||
init_completion(&complete);
|
||||
bio.bi_private = &complete;
|
||||
|
||||
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
|
||||
{
|
||||
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
|
||||
struct page *page);
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct page *page = NULL;
|
||||
int i, ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (lo->transfer != transfer_none) {
|
||||
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
||||
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
|
||||
do_lo_send = do_lo_send_direct_write;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
ret = do_lo_send(lo, bvec, pos, page);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
ret = do_lo_send(lo, &bvec, pos, page);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pos += bvec->bv_len;
|
||||
pos += bvec.bv_len;
|
||||
}
|
||||
if (page) {
|
||||
kunmap(page);
|
||||
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
|
||||
static int
|
||||
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
ssize_t s;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
s = do_lo_receive(lo, bvec, bsize, pos);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
s = do_lo_receive(lo, &bvec, bsize, pos);
|
||||
if (s < 0)
|
||||
return s;
|
||||
|
||||
if (s != bvec->bv_len) {
|
||||
if (s != bvec.bv_len) {
|
||||
zero_fill_bio(bio);
|
||||
break;
|
||||
}
|
||||
pos += bvec->bv_len;
|
||||
pos += bvec.bv_len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||
loff_t pos;
|
||||
int ret;
|
||||
|
||||
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
|
||||
pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
|
||||
|
||||
if (bio_rw(bio) == WRITE) {
|
||||
struct file *file = lo->lo_backing_file;
|
||||
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||
goto out;
|
||||
}
|
||||
ret = file->f_op->fallocate(file, mode, pos,
|
||||
bio->bi_size);
|
||||
bio->bi_iter.bi_size);
|
||||
if (unlikely(ret && ret != -EINVAL &&
|
||||
ret != -EOPNOTSUPP))
|
||||
ret = -EIO;
|
||||
|
||||
@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
|
||||
{
|
||||
struct driver_data *dd = queue->queuedata;
|
||||
struct scatterlist *sg;
|
||||
struct bio_vec *bvec;
|
||||
int i, nents = 0;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
int nents = 0;
|
||||
int tag = 0, unaligned = 0;
|
||||
|
||||
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
|
||||
@@ -3993,7 +3994,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
|
||||
}
|
||||
|
||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||
bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
|
||||
bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
|
||||
bio_sectors(bio)));
|
||||
return;
|
||||
}
|
||||
@@ -4006,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
|
||||
|
||||
if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
|
||||
dd->unal_qdepth) {
|
||||
if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
|
||||
if (bio->bi_iter.bi_sector % 8 != 0)
|
||||
/* Unaligned on 4k boundaries */
|
||||
unaligned = 1;
|
||||
else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
|
||||
unaligned = 1;
|
||||
@@ -4025,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
|
||||
}
|
||||
|
||||
/* Create the scatter list for this bio. */
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
sg_set_page(&sg[nents],
|
||||
bvec->bv_page,
|
||||
bvec->bv_len,
|
||||
bvec->bv_offset);
|
||||
bvec.bv_page,
|
||||
bvec.bv_len,
|
||||
bvec.bv_offset);
|
||||
nents++;
|
||||
}
|
||||
|
||||
/* Issue the read/write. */
|
||||
mtip_hw_submit_io(dd,
|
||||
bio->bi_sector,
|
||||
bio->bi_iter.bi_sector,
|
||||
bio_sectors(bio),
|
||||
nents,
|
||||
tag,
|
||||
|
||||
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
|
||||
|
||||
if (nbd_cmd(req) == NBD_CMD_WRITE) {
|
||||
struct req_iterator iter;
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
/*
|
||||
* we are really probing at internals to determine
|
||||
* whether to set MSG_MORE or not...
|
||||
*/
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
flags = 0;
|
||||
if (!rq_iter_last(req, iter))
|
||||
if (!rq_iter_last(bvec, iter))
|
||||
flags = MSG_MORE;
|
||||
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
|
||||
nbd->disk->disk_name, req, bvec->bv_len);
|
||||
result = sock_send_bvec(nbd, bvec, flags);
|
||||
nbd->disk->disk_name, req, bvec.bv_len);
|
||||
result = sock_send_bvec(nbd, &bvec, flags);
|
||||
if (result <= 0) {
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Send data failed (result %d)\n",
|
||||
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
|
||||
nbd->disk->disk_name, req);
|
||||
if (nbd_cmd(req) == NBD_CMD_READ) {
|
||||
struct req_iterator iter;
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
result = sock_recv_bvec(nbd, bvec);
|
||||
result = sock_recv_bvec(nbd, &bvec);
|
||||
if (result <= 0) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
|
||||
result);
|
||||
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
|
||||
return req;
|
||||
}
|
||||
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
|
||||
nbd->disk->disk_name, req, bvec->bv_len);
|
||||
nbd->disk->disk_name, req, bvec.bv_len);
|
||||
}
|
||||
}
|
||||
return req;
|
||||
|
||||
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
|
||||
return total_len;
|
||||
}
|
||||
|
||||
struct nvme_bio_pair {
|
||||
struct bio b1, b2, *parent;
|
||||
struct bio_vec *bv1, *bv2;
|
||||
int err;
|
||||
atomic_t cnt;
|
||||
};
|
||||
|
||||
static void nvme_bio_pair_endio(struct bio *bio, int err)
|
||||
{
|
||||
struct nvme_bio_pair *bp = bio->bi_private;
|
||||
|
||||
if (err)
|
||||
bp->err = err;
|
||||
|
||||
if (atomic_dec_and_test(&bp->cnt)) {
|
||||
bio_endio(bp->parent, bp->err);
|
||||
kfree(bp->bv1);
|
||||
kfree(bp->bv2);
|
||||
kfree(bp);
|
||||
}
|
||||
}
|
||||
|
||||
static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
|
||||
int len, int offset)
|
||||
{
|
||||
struct nvme_bio_pair *bp;
|
||||
|
||||
BUG_ON(len > bio->bi_size);
|
||||
BUG_ON(idx > bio->bi_vcnt);
|
||||
|
||||
bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
|
||||
if (!bp)
|
||||
return NULL;
|
||||
bp->err = 0;
|
||||
|
||||
bp->b1 = *bio;
|
||||
bp->b2 = *bio;
|
||||
|
||||
bp->b1.bi_size = len;
|
||||
bp->b2.bi_size -= len;
|
||||
bp->b1.bi_vcnt = idx;
|
||||
bp->b2.bi_idx = idx;
|
||||
bp->b2.bi_sector += len >> 9;
|
||||
|
||||
if (offset) {
|
||||
bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
|
||||
GFP_ATOMIC);
|
||||
if (!bp->bv1)
|
||||
goto split_fail_1;
|
||||
|
||||
bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
|
||||
GFP_ATOMIC);
|
||||
if (!bp->bv2)
|
||||
goto split_fail_2;
|
||||
|
||||
memcpy(bp->bv1, bio->bi_io_vec,
|
||||
bio->bi_max_vecs * sizeof(struct bio_vec));
|
||||
memcpy(bp->bv2, bio->bi_io_vec,
|
||||
bio->bi_max_vecs * sizeof(struct bio_vec));
|
||||
|
||||
bp->b1.bi_io_vec = bp->bv1;
|
||||
bp->b2.bi_io_vec = bp->bv2;
|
||||
bp->b2.bi_io_vec[idx].bv_offset += offset;
|
||||
bp->b2.bi_io_vec[idx].bv_len -= offset;
|
||||
bp->b1.bi_io_vec[idx].bv_len = offset;
|
||||
bp->b1.bi_vcnt++;
|
||||
} else
|
||||
bp->bv1 = bp->bv2 = NULL;
|
||||
|
||||
bp->b1.bi_private = bp;
|
||||
bp->b2.bi_private = bp;
|
||||
|
||||
bp->b1.bi_end_io = nvme_bio_pair_endio;
|
||||
bp->b2.bi_end_io = nvme_bio_pair_endio;
|
||||
|
||||
bp->parent = bio;
|
||||
atomic_set(&bp->cnt, 2);
|
||||
|
||||
return bp;
|
||||
|
||||
split_fail_2:
|
||||
kfree(bp->bv1);
|
||||
split_fail_1:
|
||||
kfree(bp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
|
||||
int idx, int len, int offset)
|
||||
int len)
|
||||
{
|
||||
struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
|
||||
if (!bp)
|
||||
struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
|
||||
if (!split)
|
||||
return -ENOMEM;
|
||||
|
||||
bio_chain(split, bio);
|
||||
|
||||
if (bio_list_empty(&nvmeq->sq_cong))
|
||||
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
|
||||
bio_list_add(&nvmeq->sq_cong, &bp->b1);
|
||||
bio_list_add(&nvmeq->sq_cong, &bp->b2);
|
||||
bio_list_add(&nvmeq->sq_cong, split);
|
||||
bio_list_add(&nvmeq->sq_cong, bio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
|
||||
static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
|
||||
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
|
||||
{
|
||||
struct bio_vec *bvec, *bvprv = NULL;
|
||||
struct bio_vec bvec, bvprv;
|
||||
struct bvec_iter iter;
|
||||
struct scatterlist *sg = NULL;
|
||||
int i, length = 0, nsegs = 0, split_len = bio->bi_size;
|
||||
int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
|
||||
int first = 1;
|
||||
|
||||
if (nvmeq->dev->stripe_size)
|
||||
split_len = nvmeq->dev->stripe_size -
|
||||
((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
|
||||
((bio->bi_iter.bi_sector << 9) &
|
||||
(nvmeq->dev->stripe_size - 1));
|
||||
|
||||
sg_init_table(iod->sg, psegs);
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
|
||||
sg->length += bvec->bv_len;
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
|
||||
sg->length += bvec.bv_len;
|
||||
} else {
|
||||
if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
|
||||
return nvme_split_and_submit(bio, nvmeq, i,
|
||||
length, 0);
|
||||
if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
|
||||
return nvme_split_and_submit(bio, nvmeq,
|
||||
length);
|
||||
|
||||
sg = sg ? sg + 1 : iod->sg;
|
||||
sg_set_page(sg, bvec->bv_page, bvec->bv_len,
|
||||
bvec->bv_offset);
|
||||
sg_set_page(sg, bvec.bv_page,
|
||||
bvec.bv_len, bvec.bv_offset);
|
||||
nsegs++;
|
||||
}
|
||||
|
||||
if (split_len - length < bvec->bv_len)
|
||||
return nvme_split_and_submit(bio, nvmeq, i, split_len,
|
||||
split_len - length);
|
||||
length += bvec->bv_len;
|
||||
if (split_len - length < bvec.bv_len)
|
||||
return nvme_split_and_submit(bio, nvmeq, split_len);
|
||||
length += bvec.bv_len;
|
||||
bvprv = bvec;
|
||||
first = 0;
|
||||
}
|
||||
iod->nents = nsegs;
|
||||
sg_mark_end(sg);
|
||||
if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
BUG_ON(length != bio->bi_size);
|
||||
BUG_ON(length != bio->bi_iter.bi_size);
|
||||
return length;
|
||||
}
|
||||
|
||||
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
iod->npages = 0;
|
||||
|
||||
range->cattr = cpu_to_le32(0);
|
||||
range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
|
||||
range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
|
||||
range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
|
||||
range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
|
||||
|
||||
memset(cmnd, 0, sizeof(*cmnd));
|
||||
cmnd->dsm.opcode = nvme_cmd_dsm;
|
||||
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
}
|
||||
|
||||
result = -ENOMEM;
|
||||
iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
|
||||
iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
|
||||
if (!iod)
|
||||
goto nomem;
|
||||
iod->private = bio;
|
||||
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
|
||||
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
|
||||
GFP_ATOMIC);
|
||||
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
|
||||
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
|
||||
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
|
||||
cmnd->rw.control = cpu_to_le16(control);
|
||||
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
|
||||
|
||||
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
|
||||
|
||||
for (;;) {
|
||||
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
|
||||
if (s <= tmp->bio->bi_sector)
|
||||
if (s <= tmp->bio->bi_iter.bi_sector)
|
||||
next = n->rb_left;
|
||||
else
|
||||
next = n->rb_right;
|
||||
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
|
||||
n = next;
|
||||
}
|
||||
|
||||
if (s > tmp->bio->bi_sector) {
|
||||
if (s > tmp->bio->bi_iter.bi_sector) {
|
||||
tmp = pkt_rbtree_next(tmp);
|
||||
if (!tmp)
|
||||
return NULL;
|
||||
}
|
||||
BUG_ON(s > tmp->bio->bi_sector);
|
||||
BUG_ON(s > tmp->bio->bi_iter.bi_sector);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
|
||||
{
|
||||
struct rb_node **p = &pd->bio_queue.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
sector_t s = node->bio->bi_sector;
|
||||
sector_t s = node->bio->bi_iter.bi_sector;
|
||||
struct pkt_rb_node *tmp;
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
|
||||
if (s < tmp->bio->bi_sector)
|
||||
if (s < tmp->bio->bi_iter.bi_sector)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
|
||||
spin_lock(&pd->iosched.lock);
|
||||
bio = bio_list_peek(&pd->iosched.write_queue);
|
||||
spin_unlock(&pd->iosched.lock);
|
||||
if (bio && (bio->bi_sector == pd->iosched.last_write))
|
||||
if (bio && (bio->bi_iter.bi_sector ==
|
||||
pd->iosched.last_write))
|
||||
need_write_seek = 0;
|
||||
if (need_write_seek && reads_queued) {
|
||||
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
|
||||
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
|
||||
continue;
|
||||
|
||||
if (bio_data_dir(bio) == READ)
|
||||
pd->iosched.successive_reads += bio->bi_size >> 10;
|
||||
pd->iosched.successive_reads +=
|
||||
bio->bi_iter.bi_size >> 10;
|
||||
else {
|
||||
pd->iosched.successive_reads = 0;
|
||||
pd->iosched.last_write = bio_end_sector(bio);
|
||||
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
|
||||
|
||||
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
|
||||
bio, (unsigned long long)pkt->sector,
|
||||
(unsigned long long)bio->bi_sector, err);
|
||||
(unsigned long long)bio->bi_iter.bi_sector, err);
|
||||
|
||||
if (err)
|
||||
atomic_inc(&pkt->io_errors);
|
||||
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
|
||||
memset(written, 0, sizeof(written));
|
||||
spin_lock(&pkt->lock);
|
||||
bio_list_for_each(bio, &pkt->orig_bios) {
|
||||
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
|
||||
int num_frames = bio->bi_size / CD_FRAMESIZE;
|
||||
int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
|
||||
(CD_FRAMESIZE >> 9);
|
||||
int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
|
||||
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
|
||||
BUG_ON(first_frame < 0);
|
||||
BUG_ON(first_frame + num_frames > pkt->frames);
|
||||
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
|
||||
|
||||
bio = pkt->r_bios[f];
|
||||
bio_reset(bio);
|
||||
bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
|
||||
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
|
||||
bio->bi_bdev = pd->bdev;
|
||||
bio->bi_end_io = pkt_end_io_read;
|
||||
bio->bi_private = pkt;
|
||||
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
|
||||
bio_reset(pkt->bio);
|
||||
pkt->bio->bi_bdev = pd->bdev;
|
||||
pkt->bio->bi_rw = REQ_WRITE;
|
||||
pkt->bio->bi_sector = new_sector;
|
||||
pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
|
||||
pkt->bio->bi_iter.bi_sector = new_sector;
|
||||
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
|
||||
pkt->bio->bi_vcnt = pkt->frames;
|
||||
|
||||
pkt->bio->bi_end_io = pkt_end_io_packet_write;
|
||||
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
|
||||
node = first_node;
|
||||
while (node) {
|
||||
bio = node->bio;
|
||||
zone = get_zone(bio->bi_sector, pd);
|
||||
zone = get_zone(bio->bi_iter.bi_sector, pd);
|
||||
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
|
||||
if (p->sector == zone) {
|
||||
bio = NULL;
|
||||
@@ -1252,14 +1255,14 @@ try_next_bio:
|
||||
pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
|
||||
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
|
||||
bio = node->bio;
|
||||
pkt_dbg(2, pd, "found zone=%llx\n",
|
||||
(unsigned long long)get_zone(bio->bi_sector, pd));
|
||||
if (get_zone(bio->bi_sector, pd) != zone)
|
||||
pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
|
||||
get_zone(bio->bi_iter.bi_sector, pd));
|
||||
if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
|
||||
break;
|
||||
pkt_rbtree_erase(pd, node);
|
||||
spin_lock(&pkt->lock);
|
||||
bio_list_add(&pkt->orig_bios, bio);
|
||||
pkt->write_size += bio->bi_size / CD_FRAMESIZE;
|
||||
pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
|
||||
spin_unlock(&pkt->lock);
|
||||
}
|
||||
/* check write congestion marks, and if bio_queue_size is
|
||||
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
|
||||
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
|
||||
|
||||
bio_reset(pkt->w_bio);
|
||||
pkt->w_bio->bi_sector = pkt->sector;
|
||||
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
|
||||
pkt->w_bio->bi_bdev = pd->bdev;
|
||||
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
|
||||
pkt->w_bio->bi_private = pkt;
|
||||
@@ -2335,75 +2338,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
|
||||
pkt_bio_finished(pd);
|
||||
}
|
||||
|
||||
static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
|
||||
{
|
||||
struct pktcdvd_device *pd;
|
||||
char b[BDEVNAME_SIZE];
|
||||
struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
|
||||
struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
|
||||
|
||||
psd->pd = pd;
|
||||
psd->bio = bio;
|
||||
cloned_bio->bi_bdev = pd->bdev;
|
||||
cloned_bio->bi_private = psd;
|
||||
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
|
||||
pd->stats.secs_r += bio_sectors(bio);
|
||||
pkt_queue_bio(pd, cloned_bio);
|
||||
}
|
||||
|
||||
static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct pktcdvd_device *pd = q->queuedata;
|
||||
sector_t zone;
|
||||
struct packet_data *pkt;
|
||||
int was_empty, blocked_bio;
|
||||
struct pkt_rb_node *node;
|
||||
|
||||
pd = q->queuedata;
|
||||
if (!pd) {
|
||||
pr_err("%s incorrect request queue\n",
|
||||
bdevname(bio->bi_bdev, b));
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clone READ bios so we can have our own bi_end_io callback.
|
||||
*/
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
|
||||
struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
|
||||
|
||||
psd->pd = pd;
|
||||
psd->bio = bio;
|
||||
cloned_bio->bi_bdev = pd->bdev;
|
||||
cloned_bio->bi_private = psd;
|
||||
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
|
||||
pd->stats.secs_r += bio_sectors(bio);
|
||||
pkt_queue_bio(pd, cloned_bio);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
|
||||
pkt_notice(pd, "WRITE for ro device (%llu)\n",
|
||||
(unsigned long long)bio->bi_sector);
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
|
||||
pkt_err(pd, "wrong bio size\n");
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
zone = get_zone(bio->bi_sector, pd);
|
||||
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
|
||||
(unsigned long long)bio->bi_sector,
|
||||
(unsigned long long)bio_end_sector(bio));
|
||||
|
||||
/* Check if we have to split the bio */
|
||||
{
|
||||
struct bio_pair *bp;
|
||||
sector_t last_zone;
|
||||
int first_sectors;
|
||||
|
||||
last_zone = get_zone(bio_end_sector(bio) - 1, pd);
|
||||
if (last_zone != zone) {
|
||||
BUG_ON(last_zone != zone + pd->settings.size);
|
||||
first_sectors = last_zone - bio->bi_sector;
|
||||
bp = bio_split(bio, first_sectors);
|
||||
BUG_ON(!bp);
|
||||
pkt_make_request(q, &bp->bio1);
|
||||
pkt_make_request(q, &bp->bio2);
|
||||
bio_pair_release(bp);
|
||||
return;
|
||||
}
|
||||
}
|
||||
zone = get_zone(bio->bi_iter.bi_sector, pd);
|
||||
|
||||
/*
|
||||
* If we find a matching packet in state WAITING or READ_WAIT, we can
|
||||
@@ -2417,7 +2374,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
if ((pkt->state == PACKET_WAITING_STATE) ||
|
||||
(pkt->state == PACKET_READ_WAIT_STATE)) {
|
||||
bio_list_add(&pkt->orig_bios, bio);
|
||||
pkt->write_size += bio->bi_size / CD_FRAMESIZE;
|
||||
pkt->write_size +=
|
||||
bio->bi_iter.bi_size / CD_FRAMESIZE;
|
||||
if ((pkt->write_size >= pkt->frames) &&
|
||||
(pkt->state == PACKET_WAITING_STATE)) {
|
||||
atomic_inc(&pkt->run_sm);
|
||||
@@ -2476,6 +2434,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
*/
|
||||
wake_up(&pd->wqueue);
|
||||
}
|
||||
}
|
||||
|
||||
static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct pktcdvd_device *pd;
|
||||
char b[BDEVNAME_SIZE];
|
||||
struct bio *split;
|
||||
|
||||
pd = q->queuedata;
|
||||
if (!pd) {
|
||||
pr_err("%s incorrect request queue\n",
|
||||
bdevname(bio->bi_bdev, b));
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
(unsigned long long)bio_end_sector(bio));
|
||||
|
||||
/*
|
||||
* Clone READ bios so we can have our own bi_end_io callback.
|
||||
*/
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
pkt_make_request_read(pd, bio);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
|
||||
pkt_notice(pd, "WRITE for ro device (%llu)\n",
|
||||
(unsigned long long)bio->bi_iter.bi_sector);
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
|
||||
pkt_err(pd, "wrong bio size\n");
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
do {
|
||||
sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
|
||||
sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
|
||||
|
||||
if (last_zone != zone) {
|
||||
BUG_ON(last_zone != zone + pd->settings.size);
|
||||
|
||||
split = bio_split(bio, last_zone -
|
||||
bio->bi_iter.bi_sector,
|
||||
GFP_NOIO, fs_bio_set);
|
||||
bio_chain(split, bio);
|
||||
} else {
|
||||
split = bio;
|
||||
}
|
||||
|
||||
pkt_make_request_write(q, split);
|
||||
} while (split != bio);
|
||||
|
||||
return;
|
||||
end_io:
|
||||
bio_io_error(bio);
|
||||
|
||||
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
|
||||
{
|
||||
unsigned int offset = 0;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
unsigned int i = 0;
|
||||
size_t size;
|
||||
void *buf;
|
||||
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
unsigned long flags;
|
||||
dev_dbg(&dev->sbd.core,
|
||||
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
|
||||
__func__, __LINE__, i, bio_segments(iter.bio),
|
||||
bio_sectors(iter.bio), iter.bio->bi_sector);
|
||||
dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
|
||||
__func__, __LINE__, i, bio_sectors(iter.bio),
|
||||
iter.bio->bi_iter.bi_sector);
|
||||
|
||||
size = bvec->bv_len;
|
||||
buf = bvec_kmap_irq(bvec, &flags);
|
||||
size = bvec.bv_len;
|
||||
buf = bvec_kmap_irq(&bvec, &flags);
|
||||
if (gather)
|
||||
memcpy(dev->bounce_buf+offset, buf, size);
|
||||
else
|
||||
memcpy(buf, dev->bounce_buf+offset, size);
|
||||
offset += size;
|
||||
flush_kernel_dcache_page(bvec->bv_page);
|
||||
flush_kernel_dcache_page(bvec.bv_page);
|
||||
bvec_kunmap_irq(buf, &flags);
|
||||
i++;
|
||||
}
|
||||
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
|
||||
|
||||
#ifdef DEBUG
|
||||
unsigned int n = 0;
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
struct req_iterator iter;
|
||||
|
||||
rq_for_each_segment(bv, req, iter)
|
||||
|
||||
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
|
||||
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
|
||||
int write = bio_data_dir(bio) == WRITE;
|
||||
const char *op = write ? "write" : "read";
|
||||
loff_t offset = bio->bi_sector << 9;
|
||||
loff_t offset = bio->bi_iter.bi_sector << 9;
|
||||
int error = 0;
|
||||
struct bio_vec *bvec;
|
||||
unsigned int i;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct bio *next;
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
/* PS3 is ppc64, so we don't handle highmem */
|
||||
char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
|
||||
size_t len = bvec->bv_len, retlen;
|
||||
char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
|
||||
size_t len = bvec.bv_len, retlen;
|
||||
|
||||
dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
|
||||
len, offset);
|
||||
|
||||
@@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
|
||||
*/
|
||||
static void zero_bio_chain(struct bio *chain, int start_ofs)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
unsigned long flags;
|
||||
void *buf;
|
||||
int i;
|
||||
int pos = 0;
|
||||
|
||||
while (chain) {
|
||||
bio_for_each_segment(bv, chain, i) {
|
||||
if (pos + bv->bv_len > start_ofs) {
|
||||
bio_for_each_segment(bv, chain, iter) {
|
||||
if (pos + bv.bv_len > start_ofs) {
|
||||
int remainder = max(start_ofs - pos, 0);
|
||||
buf = bvec_kmap_irq(bv, &flags);
|
||||
buf = bvec_kmap_irq(&bv, &flags);
|
||||
memset(buf + remainder, 0,
|
||||
bv->bv_len - remainder);
|
||||
flush_dcache_page(bv->bv_page);
|
||||
bv.bv_len - remainder);
|
||||
flush_dcache_page(bv.bv_page);
|
||||
bvec_kunmap_irq(buf, &flags);
|
||||
}
|
||||
pos += bv->bv_len;
|
||||
pos += bv.bv_len;
|
||||
}
|
||||
|
||||
chain = chain->bi_next;
|
||||
@@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
|
||||
unsigned int len,
|
||||
gfp_t gfpmask)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
unsigned int resid;
|
||||
unsigned short idx;
|
||||
unsigned int voff;
|
||||
unsigned short end_idx;
|
||||
unsigned short vcnt;
|
||||
struct bio *bio;
|
||||
|
||||
/* Handle the easy case for the caller */
|
||||
|
||||
if (!offset && len == bio_src->bi_size)
|
||||
return bio_clone(bio_src, gfpmask);
|
||||
|
||||
if (WARN_ON_ONCE(!len))
|
||||
return NULL;
|
||||
if (WARN_ON_ONCE(len > bio_src->bi_size))
|
||||
return NULL;
|
||||
if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
|
||||
return NULL;
|
||||
|
||||
/* Find first affected segment... */
|
||||
|
||||
resid = offset;
|
||||
bio_for_each_segment(bv, bio_src, idx) {
|
||||
if (resid < bv->bv_len)
|
||||
break;
|
||||
resid -= bv->bv_len;
|
||||
}
|
||||
voff = resid;
|
||||
|
||||
/* ...and the last affected segment */
|
||||
|
||||
resid += len;
|
||||
__bio_for_each_segment(bv, bio_src, end_idx, idx) {
|
||||
if (resid <= bv->bv_len)
|
||||
break;
|
||||
resid -= bv->bv_len;
|
||||
}
|
||||
vcnt = end_idx - idx + 1;
|
||||
|
||||
/* Build the clone */
|
||||
|
||||
bio = bio_alloc(gfpmask, (unsigned int) vcnt);
|
||||
bio = bio_clone(bio_src, gfpmask);
|
||||
if (!bio)
|
||||
return NULL; /* ENOMEM */
|
||||
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
|
||||
bio->bi_rw = bio_src->bi_rw;
|
||||
bio->bi_flags |= 1 << BIO_CLONED;
|
||||
|
||||
/*
|
||||
* Copy over our part of the bio_vec, then update the first
|
||||
* and last (or only) entries.
|
||||
*/
|
||||
memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
|
||||
vcnt * sizeof (struct bio_vec));
|
||||
bio->bi_io_vec[0].bv_offset += voff;
|
||||
if (vcnt > 1) {
|
||||
bio->bi_io_vec[0].bv_len -= voff;
|
||||
bio->bi_io_vec[vcnt - 1].bv_len = resid;
|
||||
} else {
|
||||
bio->bi_io_vec[0].bv_len = len;
|
||||
}
|
||||
|
||||
bio->bi_vcnt = vcnt;
|
||||
bio->bi_size = len;
|
||||
bio->bi_idx = 0;
|
||||
bio_advance(bio, offset);
|
||||
bio->bi_iter.bi_size = len;
|
||||
|
||||
return bio;
|
||||
}
|
||||
@@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
|
||||
|
||||
/* Build up a chain of clone bios up to the limit */
|
||||
|
||||
if (!bi || off >= bi->bi_size || !len)
|
||||
if (!bi || off >= bi->bi_iter.bi_size || !len)
|
||||
return NULL; /* Nothing to clone */
|
||||
|
||||
end = &chain;
|
||||
@@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
|
||||
rbd_warn(NULL, "bio_chain exhausted with %u left", len);
|
||||
goto out_err; /* EINVAL; ran out of bio's */
|
||||
}
|
||||
bi_size = min_t(unsigned int, bi->bi_size - off, len);
|
||||
bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
|
||||
bio = bio_clone_range(bi, off, bi_size, gfpmask);
|
||||
if (!bio)
|
||||
goto out_err; /* ENOMEM */
|
||||
@@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
|
||||
end = &bio->bi_next;
|
||||
|
||||
off += bi_size;
|
||||
if (off == bi->bi_size) {
|
||||
if (off == bi->bi_iter.bi_size) {
|
||||
bi = bi->bi_next;
|
||||
off = 0;
|
||||
}
|
||||
@@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
|
||||
|
||||
if (type == OBJ_REQUEST_BIO) {
|
||||
bio_list = data_desc;
|
||||
rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
|
||||
rbd_assert(img_offset ==
|
||||
bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
|
||||
} else {
|
||||
rbd_assert(type == OBJ_REQUEST_PAGES);
|
||||
pages = data_desc;
|
||||
|
||||
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
if (!card)
|
||||
goto req_err;
|
||||
|
||||
if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
|
||||
if (bio_end_sector(bio) > get_capacity(card->gendisk))
|
||||
goto req_err;
|
||||
|
||||
if (unlikely(card->halt)) {
|
||||
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
goto req_err;
|
||||
}
|
||||
|
||||
if (bio->bi_size == 0) {
|
||||
if (bio->bi_iter.bi_size == 0) {
|
||||
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
|
||||
goto req_err;
|
||||
}
|
||||
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
|
||||
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
|
||||
(u64)bio->bi_sector << 9, bio->bi_size);
|
||||
(u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
|
||||
|
||||
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
|
||||
bio_dma_done_cb, bio_meta);
|
||||
|
||||
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
void *cb_data)
|
||||
{
|
||||
struct list_head dma_list[RSXX_MAX_TARGETS];
|
||||
struct bio_vec *bvec;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
unsigned long long addr8;
|
||||
unsigned int laddr;
|
||||
unsigned int bv_len;
|
||||
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
int st;
|
||||
int i;
|
||||
|
||||
addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
|
||||
addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
|
||||
atomic_set(n_dmas, 0);
|
||||
|
||||
for (i = 0; i < card->n_targets; i++) {
|
||||
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
}
|
||||
|
||||
if (bio->bi_rw & REQ_DISCARD) {
|
||||
bv_len = bio->bi_size;
|
||||
bv_len = bio->bi_iter.bi_size;
|
||||
|
||||
while (bv_len > 0) {
|
||||
tgt = rsxx_get_dma_tgt(card, addr8);
|
||||
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
bv_len -= RSXX_HW_BLK_SIZE;
|
||||
}
|
||||
} else {
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
bv_len = bvec->bv_len;
|
||||
bv_off = bvec->bv_offset;
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
bv_len = bvec.bv_len;
|
||||
bv_off = bvec.bv_offset;
|
||||
|
||||
while (bv_len > 0) {
|
||||
tgt = rsxx_get_dma_tgt(card, addr8);
|
||||
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||
st = rsxx_queue_dma(card, &dma_list[tgt],
|
||||
bio_data_dir(bio),
|
||||
dma_off, dma_len,
|
||||
laddr, bvec->bv_page,
|
||||
laddr, bvec.bv_page,
|
||||
bv_off, cb, cb_data);
|
||||
if (st)
|
||||
goto bvec_err;
|
||||
|
||||
@@ -108,8 +108,7 @@ struct cardinfo {
|
||||
* have been written
|
||||
*/
|
||||
struct bio *bio, *currentbio, **biotail;
|
||||
int current_idx;
|
||||
sector_t current_sector;
|
||||
struct bvec_iter current_iter;
|
||||
|
||||
struct request_queue *queue;
|
||||
|
||||
@@ -118,7 +117,7 @@ struct cardinfo {
|
||||
struct mm_dma_desc *desc;
|
||||
int cnt, headcnt;
|
||||
struct bio *bio, **biotail;
|
||||
int idx;
|
||||
struct bvec_iter iter;
|
||||
} mm_pages[2];
|
||||
#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
|
||||
|
||||
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
|
||||
dma_addr_t dma_handle;
|
||||
int offset;
|
||||
struct bio *bio;
|
||||
struct bio_vec *vec;
|
||||
int idx;
|
||||
struct bio_vec vec;
|
||||
int rw;
|
||||
int len;
|
||||
|
||||
bio = card->currentbio;
|
||||
if (!bio && card->bio) {
|
||||
card->currentbio = card->bio;
|
||||
card->current_idx = card->bio->bi_idx;
|
||||
card->current_sector = card->bio->bi_sector;
|
||||
card->current_iter = card->bio->bi_iter;
|
||||
card->bio = card->bio->bi_next;
|
||||
if (card->bio == NULL)
|
||||
card->biotail = &card->bio;
|
||||
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
|
||||
}
|
||||
if (!bio)
|
||||
return 0;
|
||||
idx = card->current_idx;
|
||||
|
||||
rw = bio_rw(bio);
|
||||
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
|
||||
return 0;
|
||||
|
||||
vec = bio_iovec_idx(bio, idx);
|
||||
len = vec->bv_len;
|
||||
vec = bio_iter_iovec(bio, card->current_iter);
|
||||
|
||||
dma_handle = pci_map_page(card->dev,
|
||||
vec->bv_page,
|
||||
vec->bv_offset,
|
||||
len,
|
||||
vec.bv_page,
|
||||
vec.bv_offset,
|
||||
vec.bv_len,
|
||||
(rw == READ) ?
|
||||
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
|
||||
|
||||
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
|
||||
desc = &p->desc[p->cnt];
|
||||
p->cnt++;
|
||||
if (p->bio == NULL)
|
||||
p->idx = idx;
|
||||
p->iter = card->current_iter;
|
||||
if ((p->biotail) != &bio->bi_next) {
|
||||
*(p->biotail) = bio;
|
||||
p->biotail = &(bio->bi_next);
|
||||
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
|
||||
desc->data_dma_handle = dma_handle;
|
||||
|
||||
desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
|
||||
desc->local_addr = cpu_to_le64(card->current_sector << 9);
|
||||
desc->transfer_size = cpu_to_le32(len);
|
||||
desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
|
||||
desc->transfer_size = cpu_to_le32(vec.bv_len);
|
||||
offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
|
||||
desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
|
||||
desc->zero1 = desc->zero2 = 0;
|
||||
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
|
||||
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
|
||||
desc->sem_control_bits = desc->control_bits;
|
||||
|
||||
card->current_sector += (len >> 9);
|
||||
idx++;
|
||||
card->current_idx = idx;
|
||||
if (idx >= bio->bi_vcnt)
|
||||
|
||||
bio_advance_iter(bio, &card->current_iter, vec.bv_len);
|
||||
if (!card->current_iter.bi_size)
|
||||
card->currentbio = NULL;
|
||||
|
||||
return 1;
|
||||
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
|
||||
struct mm_dma_desc *desc = &page->desc[page->headcnt];
|
||||
int control = le32_to_cpu(desc->sem_control_bits);
|
||||
int last = 0;
|
||||
int idx;
|
||||
struct bio_vec vec;
|
||||
|
||||
if (!(control & DMASCR_DMA_COMPLETE)) {
|
||||
control = dma_status;
|
||||
last = 1;
|
||||
}
|
||||
|
||||
page->headcnt++;
|
||||
idx = page->idx;
|
||||
page->idx++;
|
||||
if (page->idx >= bio->bi_vcnt) {
|
||||
vec = bio_iter_iovec(bio, page->iter);
|
||||
bio_advance_iter(bio, &page->iter, vec.bv_len);
|
||||
|
||||
if (!page->iter.bi_size) {
|
||||
page->bio = bio->bi_next;
|
||||
if (page->bio)
|
||||
page->idx = page->bio->bi_idx;
|
||||
page->iter = page->bio->bi_iter;
|
||||
}
|
||||
|
||||
pci_unmap_page(card->dev, desc->data_dma_handle,
|
||||
bio_iovec_idx(bio, idx)->bv_len,
|
||||
vec.bv_len,
|
||||
(control & DMASCR_TRANSFER_READ) ?
|
||||
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
|
||||
if (control & DMASCR_HARD_ERROR) {
|
||||
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct cardinfo *card = q->queuedata;
|
||||
pr_debug("mm_make_request %llu %u\n",
|
||||
(unsigned long long)bio->bi_sector, bio->bi_size);
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size);
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
*card->biotail = bio;
|
||||
|
||||
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
|
||||
bio->bi_bdev = preq.bdev;
|
||||
bio->bi_private = pending_req;
|
||||
bio->bi_end_io = end_block_io_op;
|
||||
bio->bi_sector = preq.sector_number;
|
||||
bio->bi_iter.bi_sector = preq.sector_number;
|
||||
}
|
||||
|
||||
preq.sector_number += seg[i].nsec;
|
||||
|
||||
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
for (i = 0; i < pending; i++) {
|
||||
offset = (i * segs * PAGE_SIZE) >> 9;
|
||||
size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
|
||||
(unsigned int)(bio->bi_size >> 9) - offset);
|
||||
(unsigned int)bio_sectors(bio) - offset);
|
||||
cloned_bio = bio_clone(bio, GFP_NOIO);
|
||||
BUG_ON(cloned_bio == NULL);
|
||||
bio_trim(cloned_bio, offset, size);
|
||||
|
||||
Reference in New Issue
Block a user