mirror of
https://github.com/torvalds/linux.git
synced 2024-11-06 03:51:48 +00:00
md/raid5: introduce configuration option rmw_level
Depending on the available coding we allow optimized rmw logic for write operations. To support easier testing this patch allows manual control of the rmw/rcw descision through the interface /sys/block/mdX/md/rmw_level. The configuration can handle three levels of control. rmw_level=0: Disable rmw for all RAID types. Hardware assisted P/Q calculation has no implementation path yet to factor in/out chunks of a syndrome. Enforcing this level can be benefical for slow CPUs with hardware syndrome support and fast SSDs. rmw_level=1: Estimate rmw IOs and rcw IOs. Execute rmw only if we will save IOs. This equals the "old" unpatched behaviour and will be the default. rmw_level=2: Execute rmw even if calculated IOs for rmw and rcw are equal. We might have higher CPU consumption because of calculating the parity twice but it can be benefical otherwise. E.g. RAID4 with fast dedicated parity disk/SSD. The option is implemented just to be forward-looking and will ONLY work with this patch! Signed-off-by: Markus Stockhausen <stockhausen@collogia.de> Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
584acdd49c
commit
d06f191f8e
@ -5879,6 +5879,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
|
||||
raid5_show_stripe_cache_size,
|
||||
raid5_store_stripe_cache_size);
|
||||
|
||||
static ssize_t
|
||||
raid5_show_rmw_level(struct mddev *mddev, char *page)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
if (conf)
|
||||
return sprintf(page, "%d\n", conf->rmw_level);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
unsigned long new;
|
||||
|
||||
if (!conf)
|
||||
return -ENODEV;
|
||||
|
||||
if (len >= PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (kstrtoul(page, 10, &new))
|
||||
return -EINVAL;
|
||||
|
||||
if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
|
||||
return -EINVAL;
|
||||
|
||||
if (new != PARITY_DISABLE_RMW &&
|
||||
new != PARITY_ENABLE_RMW &&
|
||||
new != PARITY_PREFER_RMW)
|
||||
return -EINVAL;
|
||||
|
||||
conf->rmw_level = new;
|
||||
return len;
|
||||
}
|
||||
|
||||
static struct md_sysfs_entry
|
||||
raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
|
||||
raid5_show_rmw_level,
|
||||
raid5_store_rmw_level);
|
||||
|
||||
|
||||
static ssize_t
|
||||
raid5_show_preread_threshold(struct mddev *mddev, char *page)
|
||||
{
|
||||
@ -6065,6 +6108,7 @@ static struct attribute *raid5_attrs[] = {
|
||||
&raid5_preread_bypass_threshold.attr,
|
||||
&raid5_group_thread_cnt.attr,
|
||||
&raid5_skip_copy.attr,
|
||||
&raid5_rmw_level.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group raid5_attrs_group = {
|
||||
|
@ -362,6 +362,7 @@ enum {
|
||||
enum {
|
||||
PARITY_DISABLE_RMW = 0,
|
||||
PARITY_ENABLE_RMW,
|
||||
PARITY_PREFER_RMW,
|
||||
};
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user