forked from Minki/linux
block-5.17-2022-01-21
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmHqtecQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgph8iD/9nahzCdiPYRE+POHneiZbfaEnBEVFH7cz1 rbEjiAR5EbkLxGZohEkIjbHuZyiF8cP6l8f1D5aEmqiFZfiuib8UOVURk9ZQdEMU lXnOhEuRopQnGGyzSs0yXdx8rZ8xvijmg2UDjwl/VZ4UMgkyD4NjFqNEjdXkmQPP pWWDkg4CQJIJ9jYeIKtfwijfeyi2LMkYniZFuwiYTAf+9Zt8OIrg7LtDkHulhMqk V/c5TSho9p22Hv0q6edQSbWhdm6QZ+MRz71Nsycr9cdvvO1jKoLKlcuXwlhqEB1q BMkwuJI4hhcauqKtwIqNIM+ulNj8HsPqRxP6n9b4RL017dhDLIrbeiOL0qG3PUNi VbC7EGvQIqTNp0zeyeIV3xM9jaBMbh+FpCqtzdT1ZKlPI4jOB89x7lXKpG30ixA2 8nWXOiRE+UxXT96EbP6cLS/ykfvMiPqbVOSXdPl9d78R1j+xQVnBdMQoX2Yp/j1Y qN40Lp2mQgNJjkIiLOZxncx2xSx1/EVTDW1OPEm2Atv/NGxSK5vaN1P+X9DKB3e7 pjpKHhvJuNy6c3yeJs5tyZrBu1zZl1dCMxC3fhK8XNTTWJ3zBiUxicDCsGN7YCwR 5VJ+FbVATrzauBPtT7uQYRFnFePu1RxY5xTCdbg04hgGZmSSIqmJvZSpqp5Nn90s M0NbwyQrLg== =cebW -----END PGP SIGNATURE----- Merge tag 'block-5.17-2022-01-21' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Various little minor fixes that should go into this release: - Fix issue with cloned bios and IO accounting (Christoph) - Remove redundant assignments (Colin, GuoYong) - Fix an issue with the mq-deadline async_depth sysfs interface (me) - Fix brd module loading race (Tetsuo) - Shared tag map wakeup fix (Laibin) - End of bdev read fix (OGAWA) - srcu leak fix (Ming)" * tag 'block-5.17-2022-01-21' of git://git.kernel.dk/linux-block: block: fix async_depth sysfs interface for mq-deadline block: Fix wrong offset in bio_truncate() block: assign bi_bdev for cloned bios in blk_rq_prep_clone block: cleanup q->srcu block: Remove unnecessary variable assignment brd: remove brd_devices_mutex mutex aoe: remove redundant assignment on variable n loop: remove redundant initialization of pointer node blk-mq: fix tag_get wait task can't be awakened
This commit is contained in:
commit
3c7c25038b
@ -569,7 +569,8 @@ static void bio_truncate(struct bio *bio, unsigned new_size)
|
||||
offset = new_size - done;
|
||||
else
|
||||
offset = 0;
|
||||
zero_user(bv.bv_page, offset, bv.bv_len - offset);
|
||||
zero_user(bv.bv_page, bv.bv_offset + offset,
|
||||
bv.bv_len - offset);
|
||||
truncated = true;
|
||||
}
|
||||
done += bv.bv_len;
|
||||
|
@ -16,6 +16,21 @@
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-mq-tag.h"
|
||||
|
||||
/*
|
||||
* Recalculate wakeup batch when tag is shared by hctx.
|
||||
*/
|
||||
static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
|
||||
unsigned int users)
|
||||
{
|
||||
if (!users)
|
||||
return;
|
||||
|
||||
sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
|
||||
users);
|
||||
sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
|
||||
users);
|
||||
}
|
||||
|
||||
/*
|
||||
* If a previously inactive queue goes active, bump the active user count.
|
||||
* We need to do this before try to allocate driver tag, then even if fail
|
||||
@ -24,18 +39,26 @@
|
||||
*/
|
||||
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
unsigned int users;
|
||||
|
||||
if (blk_mq_is_shared_tags(hctx->flags)) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
|
||||
!test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
|
||||
atomic_inc(&hctx->tags->active_queues);
|
||||
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
|
||||
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
|
||||
!test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
|
||||
atomic_inc(&hctx->tags->active_queues);
|
||||
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
|
||||
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
users = atomic_inc_return(&hctx->tags->active_queues);
|
||||
|
||||
blk_mq_update_wake_batch(hctx->tags, users);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -56,6 +79,7 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
|
||||
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
unsigned int users;
|
||||
|
||||
if (blk_mq_is_shared_tags(hctx->flags)) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
@ -68,7 +92,9 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_dec(&tags->active_queues);
|
||||
users = atomic_dec_return(&tags->active_queues);
|
||||
|
||||
blk_mq_update_wake_batch(tags, users);
|
||||
|
||||
blk_mq_tag_wakeup_all(tags, false);
|
||||
}
|
||||
|
@ -2976,6 +2976,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
bio = bio_clone_fast(bio_src, gfp_mask, bs);
|
||||
if (!bio)
|
||||
goto free_and_out;
|
||||
bio->bi_bdev = rq->q->disk->part0;
|
||||
|
||||
if (bio_ctr && bio_ctr(bio, bio_src, data))
|
||||
goto free_and_out;
|
||||
|
@ -811,6 +811,9 @@ static void blk_release_queue(struct kobject *kobj)
|
||||
|
||||
bioset_exit(&q->bio_split);
|
||||
|
||||
if (blk_queue_has_srcu(q))
|
||||
cleanup_srcu_struct(q->srcu);
|
||||
|
||||
ida_simple_remove(&blk_queue_ida, q->id);
|
||||
call_rcu(&q->rcu_head, blk_free_queue_rcu);
|
||||
}
|
||||
@ -887,7 +890,6 @@ int blk_register_queue(struct gendisk *disk)
|
||||
kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&q->sysfs_dir_lock);
|
||||
|
||||
|
@ -865,7 +865,7 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
|
||||
SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
|
||||
SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
|
||||
SHOW_INT(deadline_front_merges_show, dd->front_merges);
|
||||
SHOW_INT(deadline_async_depth_show, dd->front_merges);
|
||||
SHOW_INT(deadline_async_depth_show, dd->async_depth);
|
||||
SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
|
||||
#undef SHOW_INT
|
||||
#undef SHOW_JIFFIES
|
||||
@ -895,7 +895,7 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA
|
||||
STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
|
||||
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
|
||||
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
|
||||
STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
|
||||
STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
|
||||
STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
|
||||
#undef STORE_FUNCTION
|
||||
#undef STORE_INT
|
||||
|
@ -122,7 +122,7 @@ newtag(struct aoedev *d)
|
||||
register ulong n;
|
||||
|
||||
n = jiffies & 0xffff;
|
||||
return n |= (++d->lasttag & 0x7fff) << 16;
|
||||
return n | (++d->lasttag & 0x7fff) << 16;
|
||||
}
|
||||
|
||||
static u32
|
||||
|
@ -362,7 +362,6 @@ __setup("ramdisk_size=", ramdisk_size);
|
||||
* (should share code eventually).
|
||||
*/
|
||||
static LIST_HEAD(brd_devices);
|
||||
static DEFINE_MUTEX(brd_devices_mutex);
|
||||
static struct dentry *brd_debugfs_dir;
|
||||
|
||||
static int brd_alloc(int i)
|
||||
@ -372,21 +371,14 @@ static int brd_alloc(int i)
|
||||
char buf[DISK_NAME_LEN];
|
||||
int err = -ENOMEM;
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_for_each_entry(brd, &brd_devices, brd_list) {
|
||||
if (brd->brd_number == i) {
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
list_for_each_entry(brd, &brd_devices, brd_list)
|
||||
if (brd->brd_number == i)
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
|
||||
if (!brd) {
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
if (!brd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
brd->brd_number = i;
|
||||
list_add_tail(&brd->brd_list, &brd_devices);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
|
||||
spin_lock_init(&brd->brd_lock);
|
||||
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
|
||||
@ -429,9 +421,7 @@ static int brd_alloc(int i)
|
||||
out_cleanup_disk:
|
||||
blk_cleanup_disk(disk);
|
||||
out_free_dev:
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_del(&brd->brd_list);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
kfree(brd);
|
||||
return err;
|
||||
}
|
||||
@ -441,15 +431,19 @@ static void brd_probe(dev_t dev)
|
||||
brd_alloc(MINOR(dev) / max_part);
|
||||
}
|
||||
|
||||
static void brd_del_one(struct brd_device *brd)
|
||||
static void brd_cleanup(void)
|
||||
{
|
||||
del_gendisk(brd->brd_disk);
|
||||
blk_cleanup_disk(brd->brd_disk);
|
||||
brd_free_pages(brd);
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_del(&brd->brd_list);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
kfree(brd);
|
||||
struct brd_device *brd, *next;
|
||||
|
||||
debugfs_remove_recursive(brd_debugfs_dir);
|
||||
|
||||
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
|
||||
del_gendisk(brd->brd_disk);
|
||||
blk_cleanup_disk(brd->brd_disk);
|
||||
brd_free_pages(brd);
|
||||
list_del(&brd->brd_list);
|
||||
kfree(brd);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void brd_check_and_reset_par(void)
|
||||
@ -473,9 +467,18 @@ static inline void brd_check_and_reset_par(void)
|
||||
|
||||
static int __init brd_init(void)
|
||||
{
|
||||
struct brd_device *brd, *next;
|
||||
int err, i;
|
||||
|
||||
brd_check_and_reset_par();
|
||||
|
||||
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
|
||||
|
||||
for (i = 0; i < rd_nr; i++) {
|
||||
err = brd_alloc(i);
|
||||
if (err)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* brd module now has a feature to instantiate underlying device
|
||||
* structure on-demand, provided that there is an access dev node.
|
||||
@ -491,28 +494,16 @@ static int __init brd_init(void)
|
||||
* dynamically.
|
||||
*/
|
||||
|
||||
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
|
||||
return -EIO;
|
||||
|
||||
brd_check_and_reset_par();
|
||||
|
||||
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
|
||||
|
||||
for (i = 0; i < rd_nr; i++) {
|
||||
err = brd_alloc(i);
|
||||
if (err)
|
||||
goto out_free;
|
||||
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
|
||||
err = -EIO;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
pr_info("brd: module loaded\n");
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
debugfs_remove_recursive(brd_debugfs_dir);
|
||||
|
||||
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
|
||||
brd_del_one(brd);
|
||||
brd_cleanup();
|
||||
|
||||
pr_info("brd: module NOT loaded !!!\n");
|
||||
return err;
|
||||
@ -520,13 +511,9 @@ out_free:
|
||||
|
||||
static void __exit brd_exit(void)
|
||||
{
|
||||
struct brd_device *brd, *next;
|
||||
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
debugfs_remove_recursive(brd_debugfs_dir);
|
||||
|
||||
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
|
||||
brd_del_one(brd);
|
||||
brd_cleanup();
|
||||
|
||||
pr_info("brd: module unloaded\n");
|
||||
}
|
||||
|
@ -820,7 +820,7 @@ static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
|
||||
|
||||
static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
|
||||
{
|
||||
struct rb_node **node = &(lo->worker_tree.rb_node), *parent = NULL;
|
||||
struct rb_node **node, *parent = NULL;
|
||||
struct loop_worker *cur_worker, *worker = NULL;
|
||||
struct work_struct *work;
|
||||
struct list_head *cmd_list;
|
||||
|
@ -415,6 +415,17 @@ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
|
||||
sbitmap_free(&sbq->sb);
|
||||
}
|
||||
|
||||
/**
|
||||
* sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
|
||||
* @sbq: Bitmap queue to recalculate wake batch.
|
||||
* @users: Number of shares.
|
||||
*
|
||||
* Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
|
||||
* by depth. This interface is for HCTX shared tags or queue shared tags.
|
||||
*/
|
||||
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
|
||||
unsigned int users);
|
||||
|
||||
/**
|
||||
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
|
||||
* @sbq: Bitmap queue to resize.
|
||||
|
@ -457,10 +457,9 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
|
||||
|
||||
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
|
||||
unsigned int depth)
|
||||
static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
|
||||
unsigned int wake_batch)
|
||||
{
|
||||
unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
|
||||
int i;
|
||||
|
||||
if (sbq->wake_batch != wake_batch) {
|
||||
@ -476,6 +475,26 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
|
||||
}
|
||||
}
|
||||
|
||||
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
|
||||
unsigned int depth)
|
||||
{
|
||||
unsigned int wake_batch;
|
||||
|
||||
wake_batch = sbq_calc_wake_batch(sbq, depth);
|
||||
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
|
||||
}
|
||||
|
||||
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
|
||||
unsigned int users)
|
||||
{
|
||||
unsigned int wake_batch;
|
||||
|
||||
wake_batch = clamp_val((sbq->sb.depth + users - 1) /
|
||||
users, 4, SBQ_WAKE_BATCH);
|
||||
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
|
||||
|
||||
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
|
||||
{
|
||||
sbitmap_queue_update_wake_batch(sbq, depth);
|
||||
|
Loading…
Reference in New Issue
Block a user