dm,dax: Add dax zero_page_range operation
This patch adds support for dax zero_page_range operation to dm targets. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Mike Snitzer <snitzer@redhat.com> Link: https://lore.kernel.org/r/20200228163456.1587-5-vgoyal@redhat.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
79fa974ff6
commit
cdf6cdcd3b
@ -201,10 +201,27 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
|
|||||||
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
|
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
size_t nr_pages)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct linear_c *lc = ti->private;
|
||||||
|
struct block_device *bdev = lc->dev->bdev;
|
||||||
|
struct dax_device *dax_dev = lc->dev->dax_dev;
|
||||||
|
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
||||||
|
|
||||||
|
dev_sector = linear_map_sector(ti, sector);
|
||||||
|
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define linear_dax_direct_access NULL
|
#define linear_dax_direct_access NULL
|
||||||
#define linear_dax_copy_from_iter NULL
|
#define linear_dax_copy_from_iter NULL
|
||||||
#define linear_dax_copy_to_iter NULL
|
#define linear_dax_copy_to_iter NULL
|
||||||
|
#define linear_dax_zero_page_range NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct target_type linear_target = {
|
static struct target_type linear_target = {
|
||||||
@ -226,6 +243,7 @@ static struct target_type linear_target = {
|
|||||||
.direct_access = linear_dax_direct_access,
|
.direct_access = linear_dax_direct_access,
|
||||||
.dax_copy_from_iter = linear_dax_copy_from_iter,
|
.dax_copy_from_iter = linear_dax_copy_from_iter,
|
||||||
.dax_copy_to_iter = linear_dax_copy_to_iter,
|
.dax_copy_to_iter = linear_dax_copy_to_iter,
|
||||||
|
.dax_zero_page_range = linear_dax_zero_page_range,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init dm_linear_init(void)
|
int __init dm_linear_init(void)
|
||||||
|
@ -994,10 +994,26 @@ static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
|
|||||||
return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
|
return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
size_t nr_pages)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct log_writes_c *lc = ti->private;
|
||||||
|
sector_t sector = pgoff * PAGE_SECTORS;
|
||||||
|
|
||||||
|
ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT,
|
||||||
|
&pgoff);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
return dax_zero_page_range(lc->dev->dax_dev, pgoff,
|
||||||
|
nr_pages << PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define log_writes_dax_direct_access NULL
|
#define log_writes_dax_direct_access NULL
|
||||||
#define log_writes_dax_copy_from_iter NULL
|
#define log_writes_dax_copy_from_iter NULL
|
||||||
#define log_writes_dax_copy_to_iter NULL
|
#define log_writes_dax_copy_to_iter NULL
|
||||||
|
#define log_writes_dax_zero_page_range NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct target_type log_writes_target = {
|
static struct target_type log_writes_target = {
|
||||||
@ -1016,6 +1032,7 @@ static struct target_type log_writes_target = {
|
|||||||
.direct_access = log_writes_dax_direct_access,
|
.direct_access = log_writes_dax_direct_access,
|
||||||
.dax_copy_from_iter = log_writes_dax_copy_from_iter,
|
.dax_copy_from_iter = log_writes_dax_copy_from_iter,
|
||||||
.dax_copy_to_iter = log_writes_dax_copy_to_iter,
|
.dax_copy_to_iter = log_writes_dax_copy_to_iter,
|
||||||
|
.dax_zero_page_range = log_writes_dax_zero_page_range,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init dm_log_writes_init(void)
|
static int __init dm_log_writes_init(void)
|
||||||
|
@ -360,10 +360,32 @@ static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
|
|||||||
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
|
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
size_t nr_pages)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
||||||
|
struct stripe_c *sc = ti->private;
|
||||||
|
struct dax_device *dax_dev;
|
||||||
|
struct block_device *bdev;
|
||||||
|
uint32_t stripe;
|
||||||
|
|
||||||
|
stripe_map_sector(sc, sector, &stripe, &dev_sector);
|
||||||
|
dev_sector += sc->stripe[stripe].physical_start;
|
||||||
|
dax_dev = sc->stripe[stripe].dev->dax_dev;
|
||||||
|
bdev = sc->stripe[stripe].dev->bdev;
|
||||||
|
|
||||||
|
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define stripe_dax_direct_access NULL
|
#define stripe_dax_direct_access NULL
|
||||||
#define stripe_dax_copy_from_iter NULL
|
#define stripe_dax_copy_from_iter NULL
|
||||||
#define stripe_dax_copy_to_iter NULL
|
#define stripe_dax_copy_to_iter NULL
|
||||||
|
#define stripe_dax_zero_page_range NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -486,6 +508,7 @@ static struct target_type stripe_target = {
|
|||||||
.direct_access = stripe_dax_direct_access,
|
.direct_access = stripe_dax_direct_access,
|
||||||
.dax_copy_from_iter = stripe_dax_copy_from_iter,
|
.dax_copy_from_iter = stripe_dax_copy_from_iter,
|
||||||
.dax_copy_to_iter = stripe_dax_copy_to_iter,
|
.dax_copy_to_iter = stripe_dax_copy_to_iter,
|
||||||
|
.dax_zero_page_range = stripe_dax_zero_page_range,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __init dm_stripe_init(void)
|
int __init dm_stripe_init(void)
|
||||||
|
@ -1198,6 +1198,35 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
|
size_t nr_pages)
|
||||||
|
{
|
||||||
|
struct mapped_device *md = dax_get_private(dax_dev);
|
||||||
|
sector_t sector = pgoff * PAGE_SECTORS;
|
||||||
|
struct dm_target *ti;
|
||||||
|
int ret = -EIO;
|
||||||
|
int srcu_idx;
|
||||||
|
|
||||||
|
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
|
||||||
|
|
||||||
|
if (!ti)
|
||||||
|
goto out;
|
||||||
|
if (WARN_ON(!ti->type->dax_zero_page_range)) {
|
||||||
|
/*
|
||||||
|
* ->zero_page_range() is mandatory dax operation. If we are
|
||||||
|
* here, something is wrong.
|
||||||
|
*/
|
||||||
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
|
||||||
|
|
||||||
|
out:
|
||||||
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A target may call dm_accept_partial_bio only from the map routine. It is
|
* A target may call dm_accept_partial_bio only from the map routine. It is
|
||||||
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
|
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
|
||||||
@ -3199,6 +3228,7 @@ static const struct dax_operations dm_dax_ops = {
|
|||||||
.dax_supported = dm_dax_supported,
|
.dax_supported = dm_dax_supported,
|
||||||
.copy_from_iter = dm_dax_copy_from_iter,
|
.copy_from_iter = dm_dax_copy_from_iter,
|
||||||
.copy_to_iter = dm_dax_copy_to_iter,
|
.copy_to_iter = dm_dax_copy_to_iter,
|
||||||
|
.zero_page_range = dm_dax_zero_page_range,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -141,6 +141,8 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
|
|||||||
long nr_pages, void **kaddr, pfn_t *pfn);
|
long nr_pages, void **kaddr, pfn_t *pfn);
|
||||||
typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
|
typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||||
void *addr, size_t bytes, struct iov_iter *i);
|
void *addr, size_t bytes, struct iov_iter *i);
|
||||||
|
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
|
||||||
|
size_t nr_pages);
|
||||||
#define PAGE_SECTORS (PAGE_SIZE / 512)
|
#define PAGE_SECTORS (PAGE_SIZE / 512)
|
||||||
|
|
||||||
void dm_error(const char *message);
|
void dm_error(const char *message);
|
||||||
@ -195,6 +197,7 @@ struct target_type {
|
|||||||
dm_dax_direct_access_fn direct_access;
|
dm_dax_direct_access_fn direct_access;
|
||||||
dm_dax_copy_iter_fn dax_copy_from_iter;
|
dm_dax_copy_iter_fn dax_copy_from_iter;
|
||||||
dm_dax_copy_iter_fn dax_copy_to_iter;
|
dm_dax_copy_iter_fn dax_copy_to_iter;
|
||||||
|
dm_dax_zero_page_range_fn dax_zero_page_range;
|
||||||
|
|
||||||
/* For internal device-mapper use. */
|
/* For internal device-mapper use. */
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
Loading…
Reference in New Issue
Block a user