block-5.5-20200103

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl4PauoQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpkqMD/4zcRGP8LhkVG3I8QMqmhhJNyZb5WP5O/mV
 ekWYYzxaPhn/WQjM17Y6MBuvPTqUk1R/FiZdjW+OPUOrXUPsasSSKmLvcpiHIRHY
 YTMhzNNRpainakM7q+BwhfeNm/T3d0JXMfN8oufIfrdFHy7+bcZupctv0aQgnLdz
 Z8f40nR8t/uYfRrMzCjEM9mY2P9qelwP3ylnloS2XC0r+O/j/tBZEtnormhFwnfa
 NGx732WyHBK4Qt9bRK8FzmuUmUmhxU+MWHcR5erCCzeFEY4DnzzthFuKGvZ3Ai0f
 qWMjYJBrWHvNJL6M4Dm3zE/tVWIFg3//zo7buuZD29Ms3GoqGj88mbrS/BFKNp1+
 U5cShEMqd/7y5RUB0nOzRnfGVVHz/2WRRKvhc0/cFHulJb0q28u5pHhmlrduvxr/
 VogoSiimo7qLVGiBeXgfubuyB3nvzGvDICgiqt3rIVgpgMytoiBz53HgnNLB8QtO
 CtYDJOm8sCTtnkEPshij9Ly0dfIYUVKz7QIx65qSfcv+vxag/WmWuG5woRtqtB6N
 AoFqf8PpHXHtVrokbqAG4j4R7QrLSrYSTh/vDa6muTaiu7fe5Gb2gXKWPbIGU5n+
 b+2oKqlpVN1ZqOvhUv/PXW0U/nMx2j5Untank1ebHd+416nfNqM5lN6jsfZ3HnaR
 tpST+JtLDg==
 =ll2G
 -----END PGP SIGNATURE-----

Merge tag 'block-5.5-20200103' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three fixes in here:

   - Fix for a missing split on default memory boundary mask (4G) (Ming)

   - Fix for multi-page read bio truncate (Ming)

   - Fix for null_blk zone close request handling (Damien)"

* tag 'block-5.5-20200103' of git://git.kernel.dk/linux-block:
  null_blk: Fix REQ_OP_ZONE_CLOSE handling
  block: fix splitting segments on boundary masks
  block: add bio_truncate to fix guard_bio_eod
This commit is contained in:
Linus Torvalds 2020-01-03 12:11:30 -08:00
commit b6b4aafc99
5 changed files with 54 additions and 34 deletions

View File

@ -538,6 +538,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
}
EXPORT_SYMBOL(zero_fill_bio_iter);
void bio_truncate(struct bio *bio, unsigned new_size)
{
struct bio_vec bv;
struct bvec_iter iter;
unsigned int done = 0;
bool truncated = false;
if (new_size >= bio->bi_iter.bi_size)
return;
if (bio_data_dir(bio) != READ)
goto exit;
bio_for_each_segment(bv, bio, iter) {
if (done + bv.bv_len > new_size) {
unsigned offset;
if (!truncated)
offset = new_size - done;
else
offset = 0;
zero_user(bv.bv_page, offset, bv.bv_len - offset);
truncated = true;
}
done += bv.bv_len;
}
exit:
/*
* Don't touch bvec table here and make it really immutable, since
* fs bio user has to retrieve all pages via bio_for_each_segment_all
* in its .end_bio() callback.
*
* It is enough to truncate bio by updating .bi_size since we can make
* correct bvec with the updated .bi_size for drivers.
*/
bio->bi_iter.bi_size = new_size;
}
/**
* bio_put - release a reference to a bio
* @bio: bio to release reference to

View File

@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return sectors & (lbs - 1);
}
static unsigned get_max_segment_size(const struct request_queue *q,
unsigned offset)
static inline unsigned get_max_segment_size(const struct request_queue *q,
struct page *start_page,
unsigned long offset)
{
unsigned long mask = queue_segment_boundary(q);
/* default segment boundary mask means no boundary limit */
if (mask == BLK_SEG_BOUNDARY_MASK)
return queue_max_segment_size(q);
return min_t(unsigned long, mask - (mask & offset) + 1,
offset = mask & (page_to_phys(start_page) + offset);
return min_t(unsigned long, mask - offset + 1,
queue_max_segment_size(q));
}
@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct request_queue *q,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
seg_size = get_max_segment_size(q, bv->bv_page,
bv->bv_offset + total_len);
seg_size = min(seg_size, len);
(*nsegs)++;
@ -419,7 +418,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
unsigned len = min(get_max_segment_size(q, offset), nbytes);
unsigned len = min(get_max_segment_size(q, bvec->bv_page,
offset), nbytes);
struct page *page = bvec->bv_page;
/*

View File

@ -186,7 +186,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
if (zone->cond == BLK_ZONE_COND_FULL)
return BLK_STS_IOERR;
zone->cond = BLK_ZONE_COND_CLOSED;
if (zone->wp == zone->start)
zone->cond = BLK_ZONE_COND_EMPTY;
else
zone->cond = BLK_ZONE_COND_CLOSED;
break;
case REQ_OP_ZONE_FINISH:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)

View File

@ -3034,8 +3034,6 @@ static void end_bio_bh_io_sync(struct bio *bio)
void guard_bio_eod(int op, struct bio *bio)
{
sector_t maxsector;
struct bio_vec *bvec = bio_last_bvec_all(bio);
unsigned truncated_bytes;
struct hd_struct *part;
rcu_read_lock();
@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio)
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
return;
/* Uhhuh. We've got a bio that straddles the device size! */
truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
/*
* The bio contains more than one segment which spans EOD, just return
* and let IO layer turn it into an EIO
*/
if (truncated_bytes > bvec->bv_len)
return;
/* Truncate the bio.. */
bio->bi_iter.bi_size -= truncated_bytes;
bvec->bv_len -= truncated_bytes;
/* ..and clear the end of the buffer for reads */
if (op == REQ_OP_READ) {
struct bio_vec bv;
mp_bvec_last_segment(bvec, &bv);
zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
truncated_bytes);
}
bio_truncate(bio, maxsector << 9);
}
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,

View File

@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
void bio_truncate(struct bio *bio, unsigned new_size);
static inline void zero_fill_bio(struct bio *bio)
{