forked from Minki/linux
bdi: replace BDI_CAP_STABLE_WRITES with a queue and a sb flag
The BDI_CAP_STABLE_WRITES is one of the few bits of information in the backing_dev_info shared between the block drivers and the writeback code. To help untangling the dependency replace it with a queue flag and a superblock flag derived from it. This also helps with the case of e.g. a file system requiring stable writes due to its own checksumming, but not forcing it on other users of the block device like the swap code. One downside is that we an't support the stable_pages_required bdi attribute in sysfs anymore. It is replaced with a queue attribute which also is writable for easier testing. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5115db10a8
commit
1cb039f3dc
@ -408,7 +408,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
|
||||
bi->tuple_size = template->tuple_size;
|
||||
bi->tag_size = template->tag_size;
|
||||
|
||||
disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
|
||||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
if (disk->queue->ksm) {
|
||||
@ -428,7 +428,7 @@ EXPORT_SYMBOL(blk_integrity_register);
|
||||
*/
|
||||
void blk_integrity_unregister(struct gendisk *disk)
|
||||
{
|
||||
disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
|
||||
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_integrity_unregister);
|
||||
|
@ -116,6 +116,7 @@ static const char *const blk_queue_flag_name[] = {
|
||||
QUEUE_FLAG_NAME(SAME_FORCE),
|
||||
QUEUE_FLAG_NAME(DEAD),
|
||||
QUEUE_FLAG_NAME(INIT_DONE),
|
||||
QUEUE_FLAG_NAME(STABLE_WRITES),
|
||||
QUEUE_FLAG_NAME(POLL),
|
||||
QUEUE_FLAG_NAME(WC),
|
||||
QUEUE_FLAG_NAME(FUA),
|
||||
|
@ -287,6 +287,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
|
||||
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
|
||||
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
|
||||
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
|
||||
QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
|
||||
#undef QUEUE_SYSFS_BIT_FNS
|
||||
|
||||
static ssize_t queue_zoned_show(struct request_queue *q, char *page)
|
||||
@ -613,6 +614,7 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
|
||||
QUEUE_RW_ENTRY(queue_nonrot, "rotational");
|
||||
QUEUE_RW_ENTRY(queue_iostats, "iostats");
|
||||
QUEUE_RW_ENTRY(queue_random, "add_random");
|
||||
QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
|
||||
|
||||
static struct attribute *queue_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
@ -645,6 +647,7 @@ static struct attribute *queue_attrs[] = {
|
||||
&queue_nomerges_entry.attr,
|
||||
&queue_rq_affinity_entry.attr,
|
||||
&queue_iostats_entry.attr,
|
||||
&queue_stable_writes_entry.attr,
|
||||
&queue_random_entry.attr,
|
||||
&queue_poll_entry.attr,
|
||||
&queue_wc_entry.attr,
|
||||
|
@ -5022,7 +5022,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
||||
}
|
||||
|
||||
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
|
||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
|
||||
/*
|
||||
* disk_release() expects a queue ref from add_disk() and will
|
||||
|
@ -1955,7 +1955,7 @@ static int zram_add(void)
|
||||
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
|
||||
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
|
||||
|
||||
zram->disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
|
||||
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
|
||||
|
||||
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
|
||||
|
@ -1815,7 +1815,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
|
||||
return q && blk_queue_stable_writes(q);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1900,9 +1900,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
* because they do their own checksumming.
|
||||
*/
|
||||
if (dm_table_requires_stable_pages(t))
|
||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
else
|
||||
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
|
||||
/*
|
||||
* Determine whether or not this queue's I/O timings contribute
|
||||
|
@ -6638,14 +6638,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
|
||||
if (!conf)
|
||||
err = -ENODEV;
|
||||
else if (new != conf->skip_copy) {
|
||||
struct request_queue *q = mddev->queue;
|
||||
|
||||
mddev_suspend(mddev);
|
||||
conf->skip_copy = new;
|
||||
if (new)
|
||||
mddev->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
else
|
||||
mddev->queue->backing_dev_info->capabilities &=
|
||||
~BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
mddev_resume(mddev);
|
||||
}
|
||||
mddev_unlock(mddev);
|
||||
|
@ -472,8 +472,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
|
||||
}
|
||||
|
||||
if (mmc_host_is_spi(host) && host->use_spi_crc)
|
||||
mq->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
|
||||
|
||||
mq->queue->queuedata = mq;
|
||||
blk_queue_rq_timeout(mq->queue, 60 * HZ);
|
||||
|
@ -3926,8 +3926,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
goto out_free_ns;
|
||||
|
||||
if (ctrl->opts && ctrl->opts->data_digest)
|
||||
ns->queue->backing_dev_info->capabilities
|
||||
|= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
|
||||
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
|
||||
|
@ -673,13 +673,9 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
nvme_mpath_set_live(ns);
|
||||
}
|
||||
|
||||
if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
|
||||
struct gendisk *disk = ns->head->disk;
|
||||
|
||||
if (disk)
|
||||
disk->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
}
|
||||
if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
|
||||
ns->head->disk->queue);
|
||||
}
|
||||
|
||||
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
|
||||
|
@ -962,8 +962,8 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
|
||||
struct iscsi_conn *conn = session->leadconn;
|
||||
|
||||
if (conn->datadgst_en)
|
||||
sdev->request_queue->backing_dev_info->capabilities
|
||||
|= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
|
||||
sdev->request_queue);
|
||||
blk_queue_dma_alignment(sdev->request_queue, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1256,6 +1256,8 @@ static int set_bdev_super(struct super_block *s, void *data)
|
||||
s->s_dev = s->s_bdev->bd_dev;
|
||||
s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
|
||||
|
||||
if (blk_queue_stable_writes(s->s_bdev->bd_disk->queue))
|
||||
s->s_iflags |= SB_I_STABLE_WRITES;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
|
||||
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
|
||||
#define BDI_CAP_NO_WRITEBACK 0x00000002
|
||||
#define BDI_CAP_NO_ACCT_WB 0x00000004
|
||||
#define BDI_CAP_STABLE_WRITES 0x00000008
|
||||
#define BDI_CAP_STRICTLIMIT 0x00000010
|
||||
#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
|
||||
|
||||
@ -170,11 +169,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
|
||||
long congestion_wait(int sync, long timeout);
|
||||
long wait_iff_congested(int sync, long timeout);
|
||||
|
||||
static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
|
||||
{
|
||||
return bdi->capabilities & BDI_CAP_STABLE_WRITES;
|
||||
}
|
||||
|
||||
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
|
||||
{
|
||||
return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
|
||||
|
@ -606,6 +606,7 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
|
||||
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
|
||||
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
|
||||
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
|
||||
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
|
||||
#define QUEUE_FLAG_WC 17 /* Write back caching */
|
||||
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
|
||||
@ -635,6 +636,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
|
||||
#define blk_queue_noxmerges(q) \
|
||||
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
||||
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
|
||||
#define blk_queue_stable_writes(q) \
|
||||
test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
|
||||
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
|
||||
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
|
||||
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
||||
|
@ -1385,6 +1385,7 @@ extern int send_sigurg(struct fown_struct *fown);
|
||||
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
|
||||
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
|
||||
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
|
||||
#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
|
||||
|
||||
/* sb->s_iflags to limit user namespace mounts */
|
||||
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
|
||||
|
@ -204,10 +204,9 @@ static ssize_t stable_pages_required_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct backing_dev_info *bdi = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(page, PAGE_SIZE-1, "%d\n",
|
||||
bdi_cap_stable_pages_required(bdi) ? 1 : 0);
|
||||
dev_warn_once(dev,
|
||||
"the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
|
||||
return snprintf(page, PAGE_SIZE-1, "%d\n", 0);
|
||||
}
|
||||
static DEVICE_ATTR_RO(stable_pages_required);
|
||||
|
||||
|
@ -2849,7 +2849,7 @@ EXPORT_SYMBOL_GPL(wait_on_page_writeback);
|
||||
*/
|
||||
void wait_for_stable_page(struct page *page)
|
||||
{
|
||||
if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
|
||||
if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES)
|
||||
wait_on_page_writeback(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_for_stable_page);
|
||||
|
@ -3237,7 +3237,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
||||
goto bad_swap_unlock_inode;
|
||||
}
|
||||
|
||||
if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
|
||||
if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue))
|
||||
p->flags |= SWP_STABLE_WRITES;
|
||||
|
||||
if (p->bdev && p->bdev->bd_disk->fops->rw_page)
|
||||
|
Loading…
Reference in New Issue
Block a user