mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
Merge branch 'for-4.7/dax' into libnvdimm-for-next
This commit is contained in:
commit
594d6d96ea
@ -88,4 +88,17 @@ config NVDIMM_PFN
|
||||
|
||||
Select Y if unsure
|
||||
|
||||
config NVDIMM_DAX
|
||||
bool "NVDIMM DAX: Raw access to persistent memory"
|
||||
default LIBNVDIMM
|
||||
depends on NVDIMM_PFN
|
||||
help
|
||||
Support raw device dax access to a persistent memory
|
||||
namespace. For environments that want to hard partition
|
||||
peristent memory, this capability provides a mechanism to
|
||||
sub-divide a namespace into character devices that can only be
|
||||
accessed via DAX (mmap(2)).
|
||||
|
||||
Select Y if unsure
|
||||
|
||||
endif
|
||||
|
@ -23,3 +23,4 @@ libnvdimm-y += label.o
|
||||
libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
|
||||
libnvdimm-$(CONFIG_BTT) += btt_devs.o
|
||||
libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
|
||||
libnvdimm-$(CONFIG_NVDIMM_DAX) += dax_devs.o
|
||||
|
@ -21,19 +21,19 @@
|
||||
#include <linux/sizes.h>
|
||||
#include "nd.h"
|
||||
|
||||
struct nd_blk_device {
|
||||
struct request_queue *queue;
|
||||
struct gendisk *disk;
|
||||
struct nd_namespace_blk *nsblk;
|
||||
struct nd_blk_region *ndbr;
|
||||
size_t disk_size;
|
||||
u32 sector_size;
|
||||
u32 internal_lbasize;
|
||||
};
|
||||
|
||||
static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
|
||||
static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
|
||||
{
|
||||
return blk_dev->nsblk->lbasize - blk_dev->sector_size;
|
||||
return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
|
||||
}
|
||||
|
||||
static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
|
||||
{
|
||||
return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
|
||||
}
|
||||
|
||||
static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
|
||||
{
|
||||
return nsblk->lbasize - nsblk_meta_size(nsblk);
|
||||
}
|
||||
|
||||
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
|
||||
@ -57,20 +57,29 @@ static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
|
||||
struct bio_integrity_payload *bip, u64 lba,
|
||||
int rw)
|
||||
static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
|
||||
{
|
||||
unsigned int len = nd_blk_meta_size(blk_dev);
|
||||
struct nd_region *nd_region;
|
||||
struct device *parent;
|
||||
|
||||
parent = nsblk->common.dev.parent;
|
||||
nd_region = container_of(parent, struct nd_region, dev);
|
||||
return container_of(nd_region, struct nd_blk_region, nd_region);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
|
||||
struct bio_integrity_payload *bip, u64 lba, int rw)
|
||||
{
|
||||
struct nd_blk_region *ndbr = to_ndbr(nsblk);
|
||||
unsigned int len = nsblk_meta_size(nsblk);
|
||||
resource_size_t dev_offset, ns_offset;
|
||||
struct nd_namespace_blk *nsblk;
|
||||
struct nd_blk_region *ndbr;
|
||||
u32 internal_lbasize, sector_size;
|
||||
int err = 0;
|
||||
|
||||
nsblk = blk_dev->nsblk;
|
||||
ndbr = blk_dev->ndbr;
|
||||
ns_offset = lba * blk_dev->internal_lbasize + blk_dev->sector_size;
|
||||
internal_lbasize = nsblk_internal_lbasize(nsblk);
|
||||
sector_size = nsblk_sector_size(nsblk);
|
||||
ns_offset = lba * internal_lbasize + sector_size;
|
||||
dev_offset = to_dev_offset(nsblk, ns_offset, len);
|
||||
if (dev_offset == SIZE_MAX)
|
||||
return -EIO;
|
||||
@ -104,25 +113,26 @@ static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
|
||||
struct bio_integrity_payload *bip, u64 lba,
|
||||
int rw)
|
||||
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
|
||||
struct bio_integrity_payload *bip, u64 lba, int rw)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
|
||||
static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
|
||||
struct bio_integrity_payload *bip, struct page *page,
|
||||
unsigned int len, unsigned int off, int rw,
|
||||
sector_t sector)
|
||||
unsigned int len, unsigned int off, int rw, sector_t sector)
|
||||
{
|
||||
struct nd_blk_region *ndbr = blk_dev->ndbr;
|
||||
struct nd_blk_region *ndbr = to_ndbr(nsblk);
|
||||
resource_size_t dev_offset, ns_offset;
|
||||
u32 internal_lbasize, sector_size;
|
||||
int err = 0;
|
||||
void *iobuf;
|
||||
u64 lba;
|
||||
|
||||
internal_lbasize = nsblk_internal_lbasize(nsblk);
|
||||
sector_size = nsblk_sector_size(nsblk);
|
||||
while (len) {
|
||||
unsigned int cur_len;
|
||||
|
||||
@ -132,11 +142,11 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
|
||||
* Block Window setup/move steps. the do_io routine is capable
|
||||
* of handling len <= PAGE_SIZE.
|
||||
*/
|
||||
cur_len = bip ? min(len, blk_dev->sector_size) : len;
|
||||
cur_len = bip ? min(len, sector_size) : len;
|
||||
|
||||
lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size);
|
||||
ns_offset = lba * blk_dev->internal_lbasize;
|
||||
dev_offset = to_dev_offset(blk_dev->nsblk, ns_offset, cur_len);
|
||||
lba = div_u64(sector << SECTOR_SHIFT, sector_size);
|
||||
ns_offset = lba * internal_lbasize;
|
||||
dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
|
||||
if (dev_offset == SIZE_MAX)
|
||||
return -EIO;
|
||||
|
||||
@ -147,13 +157,13 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
|
||||
return err;
|
||||
|
||||
if (bip) {
|
||||
err = nd_blk_rw_integrity(blk_dev, bip, lba, rw);
|
||||
err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
len -= cur_len;
|
||||
off += cur_len;
|
||||
sector += blk_dev->sector_size >> SECTOR_SHIFT;
|
||||
sector += sector_size >> SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -161,10 +171,8 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
|
||||
|
||||
static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct block_device *bdev = bio->bi_bdev;
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct bio_integrity_payload *bip;
|
||||
struct nd_blk_device *blk_dev;
|
||||
struct nd_namespace_blk *nsblk;
|
||||
struct bvec_iter iter;
|
||||
unsigned long start;
|
||||
struct bio_vec bvec;
|
||||
@ -183,17 +191,17 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
||||
}
|
||||
|
||||
bip = bio_integrity(bio);
|
||||
blk_dev = disk->private_data;
|
||||
nsblk = q->queuedata;
|
||||
rw = bio_data_dir(bio);
|
||||
do_acct = nd_iostat_start(bio, &start);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
unsigned int len = bvec.bv_len;
|
||||
|
||||
BUG_ON(len > PAGE_SIZE);
|
||||
err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len,
|
||||
err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
|
||||
bvec.bv_offset, rw, iter.bi_sector);
|
||||
if (err) {
|
||||
dev_info(&blk_dev->nsblk->common.dev,
|
||||
dev_dbg(&nsblk->common.dev,
|
||||
"io error in %s sector %lld, len %d,\n",
|
||||
(rw == READ) ? "READ" : "WRITE",
|
||||
(unsigned long long) iter.bi_sector, len);
|
||||
@ -209,17 +217,16 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
|
||||
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *iobuf, size_t n, int rw)
|
||||
{
|
||||
struct nd_blk_device *blk_dev = dev_get_drvdata(ndns->claim);
|
||||
struct nd_namespace_blk *nsblk = blk_dev->nsblk;
|
||||
struct nd_blk_region *ndbr = blk_dev->ndbr;
|
||||
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
|
||||
struct nd_blk_region *ndbr = to_ndbr(nsblk);
|
||||
resource_size_t dev_offset;
|
||||
|
||||
dev_offset = to_dev_offset(nsblk, offset, n);
|
||||
|
||||
if (unlikely(offset + n > blk_dev->disk_size)) {
|
||||
if (unlikely(offset + n > nsblk->size)) {
|
||||
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -235,52 +242,66 @@ static const struct block_device_operations nd_blk_fops = {
|
||||
.revalidate_disk = nvdimm_revalidate_disk,
|
||||
};
|
||||
|
||||
static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
|
||||
struct nd_blk_device *blk_dev)
|
||||
static void nd_blk_release_queue(void *q)
|
||||
{
|
||||
blk_cleanup_queue(q);
|
||||
}
|
||||
|
||||
static void nd_blk_release_disk(void *disk)
|
||||
{
|
||||
del_gendisk(disk);
|
||||
put_disk(disk);
|
||||
}
|
||||
|
||||
static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
|
||||
{
|
||||
struct device *dev = &nsblk->common.dev;
|
||||
resource_size_t available_disk_size;
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk;
|
||||
u64 internal_nlba;
|
||||
|
||||
internal_nlba = div_u64(blk_dev->disk_size, blk_dev->internal_lbasize);
|
||||
available_disk_size = internal_nlba * blk_dev->sector_size;
|
||||
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
|
||||
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
|
||||
|
||||
blk_dev->queue = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!blk_dev->queue)
|
||||
q = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!q)
|
||||
return -ENOMEM;
|
||||
|
||||
blk_queue_make_request(blk_dev->queue, nd_blk_make_request);
|
||||
blk_queue_max_hw_sectors(blk_dev->queue, UINT_MAX);
|
||||
blk_queue_bounce_limit(blk_dev->queue, BLK_BOUNCE_ANY);
|
||||
blk_queue_logical_block_size(blk_dev->queue, blk_dev->sector_size);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, blk_dev->queue);
|
||||
|
||||
disk = blk_dev->disk = alloc_disk(0);
|
||||
if (!disk) {
|
||||
blk_cleanup_queue(blk_dev->queue);
|
||||
if (devm_add_action(dev, nd_blk_release_queue, q)) {
|
||||
blk_cleanup_queue(q);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
disk->driverfs_dev = &ndns->dev;
|
||||
blk_queue_make_request(q, nd_blk_make_request);
|
||||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||
blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
q->queuedata = nsblk;
|
||||
|
||||
disk = alloc_disk(0);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
if (devm_add_action(dev, nd_blk_release_disk, disk)) {
|
||||
put_disk(disk);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
disk->driverfs_dev = dev;
|
||||
disk->first_minor = 0;
|
||||
disk->fops = &nd_blk_fops;
|
||||
disk->private_data = blk_dev;
|
||||
disk->queue = blk_dev->queue;
|
||||
disk->queue = q;
|
||||
disk->flags = GENHD_FL_EXT_DEVT;
|
||||
nvdimm_namespace_disk_name(ndns, disk->disk_name);
|
||||
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
|
||||
set_capacity(disk, 0);
|
||||
add_disk(disk);
|
||||
|
||||
if (nd_blk_meta_size(blk_dev)) {
|
||||
int rc = nd_integrity_init(disk, nd_blk_meta_size(blk_dev));
|
||||
if (nsblk_meta_size(nsblk)) {
|
||||
int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
|
||||
|
||||
if (rc) {
|
||||
del_gendisk(disk);
|
||||
put_disk(disk);
|
||||
blk_cleanup_queue(blk_dev->queue);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
|
||||
revalidate_disk(disk);
|
||||
@ -291,56 +312,29 @@ static int nd_blk_probe(struct device *dev)
|
||||
{
|
||||
struct nd_namespace_common *ndns;
|
||||
struct nd_namespace_blk *nsblk;
|
||||
struct nd_blk_device *blk_dev;
|
||||
int rc;
|
||||
|
||||
ndns = nvdimm_namespace_common_probe(dev);
|
||||
if (IS_ERR(ndns))
|
||||
return PTR_ERR(ndns);
|
||||
|
||||
blk_dev = kzalloc(sizeof(*blk_dev), GFP_KERNEL);
|
||||
if (!blk_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
nsblk = to_nd_namespace_blk(&ndns->dev);
|
||||
blk_dev->disk_size = nvdimm_namespace_capacity(ndns);
|
||||
blk_dev->ndbr = to_nd_blk_region(dev->parent);
|
||||
blk_dev->nsblk = to_nd_namespace_blk(&ndns->dev);
|
||||
blk_dev->internal_lbasize = roundup(nsblk->lbasize,
|
||||
INT_LBASIZE_ALIGNMENT);
|
||||
blk_dev->sector_size = ((nsblk->lbasize >= 4096) ? 4096 : 512);
|
||||
dev_set_drvdata(dev, blk_dev);
|
||||
nsblk->size = nvdimm_namespace_capacity(ndns);
|
||||
dev_set_drvdata(dev, nsblk);
|
||||
|
||||
ndns->rw_bytes = nd_blk_rw_bytes;
|
||||
ndns->rw_bytes = nsblk_rw_bytes;
|
||||
if (is_nd_btt(dev))
|
||||
rc = nvdimm_namespace_attach_btt(ndns);
|
||||
else if (nd_btt_probe(ndns, blk_dev) == 0) {
|
||||
return nvdimm_namespace_attach_btt(ndns);
|
||||
else if (nd_btt_probe(dev, ndns) == 0) {
|
||||
/* we'll come back as btt-blk */
|
||||
rc = -ENXIO;
|
||||
return -ENXIO;
|
||||
} else
|
||||
rc = nd_blk_attach_disk(ndns, blk_dev);
|
||||
if (rc)
|
||||
kfree(blk_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void nd_blk_detach_disk(struct nd_blk_device *blk_dev)
|
||||
{
|
||||
del_gendisk(blk_dev->disk);
|
||||
put_disk(blk_dev->disk);
|
||||
blk_cleanup_queue(blk_dev->queue);
|
||||
return nsblk_attach_disk(nsblk);
|
||||
}
|
||||
|
||||
static int nd_blk_remove(struct device *dev)
|
||||
{
|
||||
struct nd_blk_device *blk_dev = dev_get_drvdata(dev);
|
||||
|
||||
if (is_nd_btt(dev))
|
||||
nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
|
||||
else
|
||||
nd_blk_detach_disk(blk_dev);
|
||||
kfree(blk_dev);
|
||||
|
||||
nvdimm_namespace_detach_btt(to_nd_btt(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1306,7 +1306,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
|
||||
struct btt *btt;
|
||||
struct device *dev = &nd_btt->dev;
|
||||
|
||||
btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
|
||||
btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
|
||||
if (!btt)
|
||||
return NULL;
|
||||
|
||||
@ -1321,13 +1321,13 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
|
||||
ret = discover_arenas(btt);
|
||||
if (ret) {
|
||||
dev_err(dev, "init: error in arena_discover: %d\n", ret);
|
||||
goto out_free;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (btt->init_state != INIT_READY && nd_region->ro) {
|
||||
dev_info(dev, "%s is read-only, unable to init btt metadata\n",
|
||||
dev_name(&nd_region->dev));
|
||||
goto out_free;
|
||||
return NULL;
|
||||
} else if (btt->init_state != INIT_READY) {
|
||||
btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
|
||||
((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
|
||||
@ -1337,29 +1337,25 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
|
||||
ret = create_arenas(btt);
|
||||
if (ret) {
|
||||
dev_info(dev, "init: create_arenas: %d\n", ret);
|
||||
goto out_free;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = btt_meta_init(btt);
|
||||
if (ret) {
|
||||
dev_err(dev, "init: error in meta_init: %d\n", ret);
|
||||
goto out_free;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = btt_blk_init(btt);
|
||||
if (ret) {
|
||||
dev_err(dev, "init: error in blk_init: %d\n", ret);
|
||||
goto out_free;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
btt_debugfs_init(btt);
|
||||
|
||||
return btt;
|
||||
|
||||
out_free:
|
||||
kfree(btt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1377,7 +1373,6 @@ static void btt_fini(struct btt *btt)
|
||||
btt_blk_cleanup(btt);
|
||||
free_arenas(btt);
|
||||
debugfs_remove_recursive(btt->debugfs_dir);
|
||||
kfree(btt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1406,9 +1401,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
|
||||
}
|
||||
EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
|
||||
|
||||
int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
|
||||
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
|
||||
{
|
||||
struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
|
||||
struct btt *btt = nd_btt->btt;
|
||||
|
||||
btt_fini(btt);
|
||||
|
@ -273,10 +273,10 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
|
||||
int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
|
||||
{
|
||||
int rc;
|
||||
struct device *dev;
|
||||
struct device *btt_dev;
|
||||
struct btt_sb *btt_sb;
|
||||
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
|
||||
|
||||
@ -284,21 +284,19 @@ int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
|
||||
return -ENODEV;
|
||||
|
||||
nvdimm_bus_lock(&ndns->dev);
|
||||
dev = __nd_btt_create(nd_region, 0, NULL, ndns);
|
||||
btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
|
||||
nvdimm_bus_unlock(&ndns->dev);
|
||||
if (!dev)
|
||||
if (!btt_dev)
|
||||
return -ENOMEM;
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
btt_sb = kzalloc(sizeof(*btt_sb), GFP_KERNEL);
|
||||
rc = __nd_btt_probe(to_nd_btt(dev), ndns, btt_sb);
|
||||
kfree(btt_sb);
|
||||
dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__,
|
||||
rc == 0 ? dev_name(dev) : "<none>");
|
||||
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
|
||||
rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
|
||||
dev_dbg(dev, "%s: btt: %s\n", __func__,
|
||||
rc == 0 ? dev_name(btt_dev) : "<none>");
|
||||
if (rc < 0) {
|
||||
struct nd_btt *nd_btt = to_nd_btt(dev);
|
||||
struct nd_btt *nd_btt = to_nd_btt(btt_dev);
|
||||
|
||||
__nd_detach_ndns(dev, &nd_btt->ndns);
|
||||
put_device(dev);
|
||||
__nd_detach_ndns(btt_dev, &nd_btt->ndns);
|
||||
put_device(btt_dev);
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -40,6 +40,8 @@ static int to_nd_device_type(struct device *dev)
|
||||
return ND_DEVICE_REGION_PMEM;
|
||||
else if (is_nd_blk(dev))
|
||||
return ND_DEVICE_REGION_BLK;
|
||||
else if (is_nd_dax(dev))
|
||||
return ND_DEVICE_DAX_PMEM;
|
||||
else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
|
||||
return nd_region_to_nstype(to_nd_region(dev->parent));
|
||||
|
||||
@ -246,6 +248,8 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
|
||||
|
||||
void __nd_device_register(struct device *dev)
|
||||
{
|
||||
if (!dev)
|
||||
return;
|
||||
dev->bus = &nvdimm_bus_type;
|
||||
get_device(dev);
|
||||
async_schedule_domain(nd_async_device_register, dev,
|
||||
|
@ -12,6 +12,7 @@
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/pmem.h>
|
||||
#include "nd-core.h"
|
||||
#include "pfn.h"
|
||||
#include "btt.h"
|
||||
@ -84,6 +85,8 @@ static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
|
||||
seed = nd_region->btt_seed;
|
||||
else if (is_nd_pfn(dev))
|
||||
seed = nd_region->pfn_seed;
|
||||
else if (is_nd_dax(dev))
|
||||
seed = nd_region->dax_seed;
|
||||
|
||||
if (seed == dev || ndns || dev->driver)
|
||||
return false;
|
||||
@ -199,3 +202,63 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
|
||||
return sum;
|
||||
}
|
||||
EXPORT_SYMBOL(nd_sb_checksum);
|
||||
|
||||
static int nsio_rw_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *buf, size_t size, int rw)
|
||||
{
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
|
||||
if (unlikely(offset + size > nsio->size)) {
|
||||
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (rw == READ) {
|
||||
unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
|
||||
|
||||
if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
|
||||
return -EIO;
|
||||
return memcpy_from_pmem(buf, nsio->addr + offset, size);
|
||||
} else {
|
||||
memcpy_to_pmem(nsio->addr + offset, buf, size);
|
||||
wmb_pmem();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
|
||||
{
|
||||
struct resource *res = &nsio->res;
|
||||
struct nd_namespace_common *ndns = &nsio->common;
|
||||
|
||||
nsio->size = resource_size(res);
|
||||
if (!devm_request_mem_region(dev, res->start, resource_size(res),
|
||||
dev_name(dev))) {
|
||||
dev_warn(dev, "could not reserve region %pR\n", res);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ndns->rw_bytes = nsio_rw_bytes;
|
||||
if (devm_init_badblocks(dev, &nsio->bb))
|
||||
return -ENOMEM;
|
||||
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
|
||||
&nsio->res);
|
||||
|
||||
nsio->addr = devm_memremap(dev, res->start, resource_size(res),
|
||||
ARCH_MEMREMAP_PMEM);
|
||||
if (IS_ERR(nsio->addr))
|
||||
return PTR_ERR(nsio->addr);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_nsio_enable);
|
||||
|
||||
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
|
||||
{
|
||||
struct resource *res = &nsio->res;
|
||||
|
||||
devm_memunmap(dev, nsio->addr);
|
||||
devm_exit_badblocks(dev, &nsio->bb);
|
||||
devm_release_mem_region(dev, res->start, resource_size(res));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_nsio_disable);
|
||||
|
99
drivers/nvdimm/dax_devs.c
Normal file
99
drivers/nvdimm/dax_devs.c
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include "nd-core.h"
|
||||
#include "nd.h"
|
||||
|
||||
static void nd_dax_release(struct device *dev)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_dax *nd_dax = to_nd_dax(dev);
|
||||
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
|
||||
|
||||
dev_dbg(dev, "%s\n", __func__);
|
||||
nd_detach_ndns(dev, &nd_pfn->ndns);
|
||||
ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
|
||||
kfree(nd_pfn->uuid);
|
||||
kfree(nd_dax);
|
||||
}
|
||||
|
||||
static struct device_type nd_dax_device_type = {
|
||||
.name = "nd_dax",
|
||||
.release = nd_dax_release,
|
||||
};
|
||||
|
||||
bool is_nd_dax(struct device *dev)
|
||||
{
|
||||
return dev ? dev->type == &nd_dax_device_type : false;
|
||||
}
|
||||
EXPORT_SYMBOL(is_nd_dax);
|
||||
|
||||
struct nd_dax *to_nd_dax(struct device *dev)
|
||||
{
|
||||
struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
|
||||
|
||||
WARN_ON(!is_nd_dax(dev));
|
||||
return nd_dax;
|
||||
}
|
||||
EXPORT_SYMBOL(to_nd_dax);
|
||||
|
||||
static const struct attribute_group *nd_dax_attribute_groups[] = {
|
||||
&nd_pfn_attribute_group,
|
||||
&nd_device_attribute_group,
|
||||
&nd_numa_attribute_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
|
||||
{
|
||||
struct nd_pfn *nd_pfn;
|
||||
struct nd_dax *nd_dax;
|
||||
struct device *dev;
|
||||
|
||||
nd_dax = kzalloc(sizeof(*nd_dax), GFP_KERNEL);
|
||||
if (!nd_dax)
|
||||
return NULL;
|
||||
|
||||
nd_pfn = &nd_dax->nd_pfn;
|
||||
nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
|
||||
if (nd_pfn->id < 0) {
|
||||
kfree(nd_dax);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dev = &nd_pfn->dev;
|
||||
dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
|
||||
dev->groups = nd_dax_attribute_groups;
|
||||
dev->type = &nd_dax_device_type;
|
||||
dev->parent = &nd_region->dev;
|
||||
|
||||
return nd_dax;
|
||||
}
|
||||
|
||||
struct device *nd_dax_create(struct nd_region *nd_region)
|
||||
{
|
||||
struct device *dev = NULL;
|
||||
struct nd_dax *nd_dax;
|
||||
|
||||
if (!is_nd_pmem(&nd_region->dev))
|
||||
return NULL;
|
||||
|
||||
nd_dax = nd_dax_alloc(nd_region);
|
||||
if (nd_dax)
|
||||
dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
|
||||
__nd_device_register(dev);
|
||||
return dev;
|
||||
}
|
@ -1288,6 +1288,8 @@ static ssize_t mode_show(struct device *dev,
|
||||
mode = "safe";
|
||||
else if (claim && is_nd_pfn(claim))
|
||||
mode = "memory";
|
||||
else if (claim && is_nd_dax(claim))
|
||||
mode = "dax";
|
||||
else if (!claim && pmem_should_map_pages(dev))
|
||||
mode = "memory";
|
||||
else
|
||||
@ -1379,21 +1381,19 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
|
||||
{
|
||||
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
|
||||
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
|
||||
struct nd_namespace_common *ndns;
|
||||
struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
|
||||
struct nd_namespace_common *ndns = NULL;
|
||||
resource_size_t size;
|
||||
|
||||
if (nd_btt || nd_pfn) {
|
||||
struct device *host = NULL;
|
||||
|
||||
if (nd_btt) {
|
||||
host = &nd_btt->dev;
|
||||
if (nd_btt || nd_pfn || nd_dax) {
|
||||
if (nd_btt)
|
||||
ndns = nd_btt->ndns;
|
||||
} else if (nd_pfn) {
|
||||
host = &nd_pfn->dev;
|
||||
else if (nd_pfn)
|
||||
ndns = nd_pfn->ndns;
|
||||
}
|
||||
else if (nd_dax)
|
||||
ndns = nd_dax->nd_pfn.ndns;
|
||||
|
||||
if (!ndns || !host)
|
||||
if (!ndns)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
/*
|
||||
@ -1404,12 +1404,12 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
|
||||
device_unlock(&ndns->dev);
|
||||
if (ndns->dev.driver) {
|
||||
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
|
||||
dev_name(host));
|
||||
dev_name(dev));
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
|
||||
if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
|
||||
"host (%s) vs claim (%s) mismatch\n",
|
||||
dev_name(host),
|
||||
dev_name(dev),
|
||||
dev_name(ndns->claim)))
|
||||
return ERR_PTR(-ENXIO);
|
||||
} else {
|
||||
@ -1784,6 +1784,18 @@ void nd_region_create_blk_seed(struct nd_region *nd_region)
|
||||
nd_device_register(nd_region->ns_seed);
|
||||
}
|
||||
|
||||
void nd_region_create_dax_seed(struct nd_region *nd_region)
|
||||
{
|
||||
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
|
||||
nd_region->dax_seed = nd_dax_create(nd_region);
|
||||
/*
|
||||
* Seed creation failures are not fatal, provisioning is simply
|
||||
* disabled until memory becomes available
|
||||
*/
|
||||
if (!nd_region->dax_seed)
|
||||
dev_err(&nd_region->dev, "failed to create dax namespace\n");
|
||||
}
|
||||
|
||||
void nd_region_create_pfn_seed(struct nd_region *nd_region)
|
||||
{
|
||||
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
|
||||
|
@ -54,6 +54,7 @@ struct nd_region;
|
||||
void nd_region_create_blk_seed(struct nd_region *nd_region);
|
||||
void nd_region_create_btt_seed(struct nd_region *nd_region);
|
||||
void nd_region_create_pfn_seed(struct nd_region *nd_region);
|
||||
void nd_region_create_dax_seed(struct nd_region *nd_region);
|
||||
void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
|
||||
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
|
||||
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#ifndef __ND_H__
|
||||
#define __ND_H__
|
||||
#include <linux/libnvdimm.h>
|
||||
#include <linux/badblocks.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
@ -100,10 +101,12 @@ struct nd_region {
|
||||
struct ida ns_ida;
|
||||
struct ida btt_ida;
|
||||
struct ida pfn_ida;
|
||||
struct ida dax_ida;
|
||||
unsigned long flags;
|
||||
struct device *ns_seed;
|
||||
struct device *btt_seed;
|
||||
struct device *pfn_seed;
|
||||
struct device *dax_seed;
|
||||
u16 ndr_mappings;
|
||||
u64 ndr_size;
|
||||
u64 ndr_start;
|
||||
@ -160,6 +163,10 @@ struct nd_pfn {
|
||||
struct nd_namespace_common *ndns;
|
||||
};
|
||||
|
||||
struct nd_dax {
|
||||
struct nd_pfn nd_pfn;
|
||||
};
|
||||
|
||||
enum nd_async_mode {
|
||||
ND_SYNC,
|
||||
ND_ASYNC,
|
||||
@ -197,11 +204,12 @@ struct nd_gen_sb {
|
||||
|
||||
u64 nd_sb_checksum(struct nd_gen_sb *sb);
|
||||
#if IS_ENABLED(CONFIG_BTT)
|
||||
int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata);
|
||||
int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
|
||||
bool is_nd_btt(struct device *dev);
|
||||
struct device *nd_btt_create(struct nd_region *nd_region);
|
||||
#else
|
||||
static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
|
||||
static inline int nd_btt_probe(struct device *dev,
|
||||
struct nd_namespace_common *ndns)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -219,12 +227,16 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
|
||||
|
||||
struct nd_pfn *to_nd_pfn(struct device *dev);
|
||||
#if IS_ENABLED(CONFIG_NVDIMM_PFN)
|
||||
int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata);
|
||||
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
|
||||
bool is_nd_pfn(struct device *dev);
|
||||
struct device *nd_pfn_create(struct nd_region *nd_region);
|
||||
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
|
||||
struct nd_namespace_common *ndns);
|
||||
int nd_pfn_validate(struct nd_pfn *nd_pfn);
|
||||
extern struct attribute_group nd_pfn_attribute_group;
|
||||
#else
|
||||
static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
|
||||
static inline int nd_pfn_probe(struct device *dev,
|
||||
struct nd_namespace_common *ndns)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -245,6 +257,22 @@ static inline int nd_pfn_validate(struct nd_pfn *nd_pfn)
|
||||
}
|
||||
#endif
|
||||
|
||||
struct nd_dax *to_nd_dax(struct device *dev);
|
||||
#if IS_ENABLED(CONFIG_NVDIMM_DAX)
|
||||
bool is_nd_dax(struct device *dev);
|
||||
struct device *nd_dax_create(struct nd_region *nd_region);
|
||||
#else
|
||||
static inline bool is_nd_dax(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct device *nd_dax_create(struct nd_region *nd_region)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct nd_region *to_nd_region(struct device *dev);
|
||||
int nd_region_to_nstype(struct nd_region *nd_region);
|
||||
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
|
||||
@ -263,11 +291,32 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
|
||||
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
|
||||
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
|
||||
int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
|
||||
int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
|
||||
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
|
||||
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
|
||||
char *name);
|
||||
void nvdimm_badblocks_populate(struct nd_region *nd_region,
|
||||
struct badblocks *bb, const struct resource *res);
|
||||
#if IS_ENABLED(CONFIG_ND_CLAIM)
|
||||
struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
|
||||
struct resource *res, struct vmem_altmap *altmap);
|
||||
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
|
||||
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
|
||||
#else
|
||||
static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
|
||||
struct resource *res, struct vmem_altmap *altmap)
|
||||
{
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
static inline int devm_nsio_enable(struct device *dev,
|
||||
struct nd_namespace_io *nsio)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
static inline void devm_nsio_disable(struct device *dev,
|
||||
struct nd_namespace_io *nsio)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
int nd_blk_region_init(struct nd_region *nd_region);
|
||||
void __nd_iostat_start(struct bio *bio, unsigned long *start);
|
||||
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
|
||||
@ -281,6 +330,19 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
|
||||
return true;
|
||||
}
|
||||
void nd_iostat_end(struct bio *bio, unsigned long start);
|
||||
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
|
||||
unsigned int len)
|
||||
{
|
||||
if (bb->count) {
|
||||
sector_t first_bad;
|
||||
int num_bad;
|
||||
|
||||
return !!badblocks_check(bb, sector, len / 512, &first_bad,
|
||||
&num_bad);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
|
||||
const u8 *nd_dev_to_uuid(struct device *dev);
|
||||
bool pmem_should_map_pages(struct device *dev);
|
||||
|
@ -33,7 +33,9 @@ struct nd_pfn_sb {
|
||||
/* minor-version-1 additions for section alignment */
|
||||
__le32 start_pad;
|
||||
__le32 end_trunc;
|
||||
u8 padding[4004];
|
||||
/* minor-version-2 record the base alignment of the mapping */
|
||||
__le32 align;
|
||||
u8 padding[4000];
|
||||
__le64 checksum;
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -10,6 +10,7 @@
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/memremap.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/genhd.h>
|
||||
@ -53,10 +54,29 @@ struct nd_pfn *to_nd_pfn(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(to_nd_pfn);
|
||||
|
||||
static struct nd_pfn *to_nd_pfn_safe(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* pfn device attributes are re-used by dax device instances, so we
|
||||
* need to be careful to correct device-to-nd_pfn conversion.
|
||||
*/
|
||||
if (is_nd_pfn(dev))
|
||||
return to_nd_pfn(dev);
|
||||
|
||||
if (is_nd_dax(dev)) {
|
||||
struct nd_dax *nd_dax = to_nd_dax(dev);
|
||||
|
||||
return &nd_dax->nd_pfn;
|
||||
}
|
||||
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static ssize_t mode_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
|
||||
switch (nd_pfn->mode) {
|
||||
case PFN_MODE_RAM:
|
||||
@ -71,7 +91,7 @@ static ssize_t mode_show(struct device *dev,
|
||||
static ssize_t mode_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc = 0;
|
||||
|
||||
device_lock(dev);
|
||||
@ -105,7 +125,7 @@ static DEVICE_ATTR_RW(mode);
|
||||
static ssize_t align_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
|
||||
return sprintf(buf, "%lx\n", nd_pfn->align);
|
||||
}
|
||||
@ -133,7 +153,7 @@ static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf)
|
||||
static ssize_t align_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc;
|
||||
|
||||
device_lock(dev);
|
||||
@ -151,7 +171,7 @@ static DEVICE_ATTR_RW(align);
|
||||
static ssize_t uuid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
|
||||
if (nd_pfn->uuid)
|
||||
return sprintf(buf, "%pUb\n", nd_pfn->uuid);
|
||||
@ -161,7 +181,7 @@ static ssize_t uuid_show(struct device *dev,
|
||||
static ssize_t uuid_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc;
|
||||
|
||||
device_lock(dev);
|
||||
@ -177,7 +197,7 @@ static DEVICE_ATTR_RW(uuid);
|
||||
static ssize_t namespace_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc;
|
||||
|
||||
nvdimm_bus_lock(dev);
|
||||
@ -190,7 +210,7 @@ static ssize_t namespace_show(struct device *dev,
|
||||
static ssize_t namespace_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc;
|
||||
|
||||
device_lock(dev);
|
||||
@ -208,7 +228,7 @@ static DEVICE_ATTR_RW(namespace);
|
||||
static ssize_t resource_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc;
|
||||
|
||||
device_lock(dev);
|
||||
@ -234,7 +254,7 @@ static DEVICE_ATTR_RO(resource);
|
||||
static ssize_t size_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
|
||||
ssize_t rc;
|
||||
|
||||
device_lock(dev);
|
||||
@ -269,7 +289,7 @@ static struct attribute *nd_pfn_attributes[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nd_pfn_attribute_group = {
|
||||
struct attribute_group nd_pfn_attribute_group = {
|
||||
.attrs = nd_pfn_attributes,
|
||||
};
|
||||
|
||||
@ -280,16 +300,32 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct device *__nd_pfn_create(struct nd_region *nd_region,
|
||||
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
|
||||
struct nd_namespace_common *ndns)
|
||||
{
|
||||
struct device *dev = &nd_pfn->dev;
|
||||
|
||||
if (!nd_pfn)
|
||||
return NULL;
|
||||
|
||||
nd_pfn->mode = PFN_MODE_NONE;
|
||||
nd_pfn->align = HPAGE_SIZE;
|
||||
dev = &nd_pfn->dev;
|
||||
device_initialize(&nd_pfn->dev);
|
||||
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
|
||||
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
|
||||
__func__, dev_name(ndns->claim));
|
||||
put_device(dev);
|
||||
return NULL;
|
||||
}
|
||||
return dev;
|
||||
}
|
||||
|
||||
static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
|
||||
{
|
||||
struct nd_pfn *nd_pfn;
|
||||
struct device *dev;
|
||||
|
||||
/* we can only create pages for contiguous ranged of pmem */
|
||||
if (!is_nd_pmem(&nd_region->dev))
|
||||
return NULL;
|
||||
|
||||
nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
|
||||
if (!nd_pfn)
|
||||
return NULL;
|
||||
@ -300,28 +336,26 @@ static struct device *__nd_pfn_create(struct nd_region *nd_region,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
nd_pfn->mode = PFN_MODE_NONE;
|
||||
nd_pfn->align = HPAGE_SIZE;
|
||||
dev = &nd_pfn->dev;
|
||||
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
|
||||
dev->parent = &nd_region->dev;
|
||||
dev->type = &nd_pfn_device_type;
|
||||
dev->groups = nd_pfn_attribute_groups;
|
||||
device_initialize(&nd_pfn->dev);
|
||||
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
|
||||
dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
|
||||
__func__, dev_name(ndns->claim));
|
||||
put_device(dev);
|
||||
return NULL;
|
||||
}
|
||||
return dev;
|
||||
dev->type = &nd_pfn_device_type;
|
||||
dev->parent = &nd_region->dev;
|
||||
|
||||
return nd_pfn;
|
||||
}
|
||||
|
||||
struct device *nd_pfn_create(struct nd_region *nd_region)
|
||||
{
|
||||
struct device *dev = __nd_pfn_create(nd_region, NULL);
|
||||
struct nd_pfn *nd_pfn;
|
||||
struct device *dev;
|
||||
|
||||
if (!is_nd_pmem(&nd_region->dev))
|
||||
return NULL;
|
||||
|
||||
nd_pfn = nd_pfn_alloc(nd_region);
|
||||
dev = nd_pfn_devinit(nd_pfn, NULL);
|
||||
|
||||
if (dev)
|
||||
__nd_device_register(dev);
|
||||
return dev;
|
||||
}
|
||||
@ -360,6 +394,9 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
|
||||
pfn_sb->end_trunc = 0;
|
||||
}
|
||||
|
||||
if (__le16_to_cpu(pfn_sb->version_minor) < 2)
|
||||
pfn_sb->align = 0;
|
||||
|
||||
switch (le32_to_cpu(pfn_sb->mode)) {
|
||||
case PFN_MODE_RAM:
|
||||
case PFN_MODE_PMEM:
|
||||
@ -399,7 +436,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
nd_pfn->align = 1UL << ilog2(offset);
|
||||
nd_pfn->align = le32_to_cpu(pfn_sb->align);
|
||||
if (!is_power_of_2(offset) || offset < PAGE_SIZE) {
|
||||
dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
|
||||
offset);
|
||||
@ -410,11 +447,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
|
||||
}
|
||||
EXPORT_SYMBOL(nd_pfn_validate);
|
||||
|
||||
int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
|
||||
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
|
||||
{
|
||||
int rc;
|
||||
struct device *dev;
|
||||
struct nd_pfn *nd_pfn;
|
||||
struct device *pfn_dev;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
|
||||
|
||||
@ -422,25 +459,213 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
|
||||
return -ENODEV;
|
||||
|
||||
nvdimm_bus_lock(&ndns->dev);
|
||||
dev = __nd_pfn_create(nd_region, ndns);
|
||||
nd_pfn = nd_pfn_alloc(nd_region);
|
||||
pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
|
||||
nvdimm_bus_unlock(&ndns->dev);
|
||||
if (!dev)
|
||||
if (!pfn_dev)
|
||||
return -ENOMEM;
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
|
||||
nd_pfn = to_nd_pfn(dev);
|
||||
pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
|
||||
nd_pfn = to_nd_pfn(pfn_dev);
|
||||
nd_pfn->pfn_sb = pfn_sb;
|
||||
rc = nd_pfn_validate(nd_pfn);
|
||||
nd_pfn->pfn_sb = NULL;
|
||||
kfree(pfn_sb);
|
||||
dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__,
|
||||
rc == 0 ? dev_name(dev) : "<none>");
|
||||
dev_dbg(dev, "%s: pfn: %s\n", __func__,
|
||||
rc == 0 ? dev_name(pfn_dev) : "<none>");
|
||||
if (rc < 0) {
|
||||
__nd_detach_ndns(dev, &nd_pfn->ndns);
|
||||
put_device(dev);
|
||||
__nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
|
||||
put_device(pfn_dev);
|
||||
} else
|
||||
__nd_device_register(&nd_pfn->dev);
|
||||
__nd_device_register(pfn_dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nd_pfn_probe);
|
||||
|
||||
/*
|
||||
* We hotplug memory at section granularity, pad the reserved area from
|
||||
* the previous section base to the namespace base address.
|
||||
*/
|
||||
static unsigned long init_altmap_base(resource_size_t base)
|
||||
{
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
return PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
}
|
||||
|
||||
static unsigned long init_altmap_reserve(resource_size_t base)
|
||||
{
|
||||
unsigned long reserve = PHYS_PFN(SZ_8K);
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
return reserve;
|
||||
}
|
||||
|
||||
static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
|
||||
struct resource *res, struct vmem_altmap *altmap)
|
||||
{
|
||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||
u64 offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
||||
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
resource_size_t base = nsio->res.start + start_pad;
|
||||
struct vmem_altmap __altmap = {
|
||||
.base_pfn = init_altmap_base(base),
|
||||
.reserve = init_altmap_reserve(base),
|
||||
};
|
||||
|
||||
memcpy(res, &nsio->res, sizeof(*res));
|
||||
res->start += start_pad;
|
||||
res->end -= end_trunc;
|
||||
|
||||
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
|
||||
if (nd_pfn->mode == PFN_MODE_RAM) {
|
||||
if (offset < SZ_8K)
|
||||
return ERR_PTR(-EINVAL);
|
||||
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
||||
altmap = NULL;
|
||||
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
|
||||
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
|
||||
dev_info(&nd_pfn->dev,
|
||||
"number of pfns truncated from %lld to %ld\n",
|
||||
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
||||
nd_pfn->npfns);
|
||||
memcpy(altmap, &__altmap, sizeof(*altmap));
|
||||
altmap->free = PHYS_PFN(offset - SZ_8K);
|
||||
altmap->alloc = 0;
|
||||
} else
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
return altmap;
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
u32 start_pad = 0, end_trunc = 0;
|
||||
resource_size_t start, size;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct nd_region *nd_region;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
u64 checksum;
|
||||
int rc;
|
||||
|
||||
pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
|
||||
if (!pfn_sb)
|
||||
return -ENOMEM;
|
||||
|
||||
nd_pfn->pfn_sb = pfn_sb;
|
||||
rc = nd_pfn_validate(nd_pfn);
|
||||
if (rc != -ENODEV)
|
||||
return rc;
|
||||
|
||||
/* no info block, do init */;
|
||||
nd_region = to_nd_region(nd_pfn->dev.parent);
|
||||
if (nd_region->ro) {
|
||||
dev_info(&nd_pfn->dev,
|
||||
"%s is read-only, unable to init metadata\n",
|
||||
dev_name(&nd_region->dev));
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
memset(pfn_sb, 0, sizeof(*pfn_sb));
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM' when section aligned and
|
||||
* trim it accordingly
|
||||
*/
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
|
||||
size = resource_size(&nsio->res);
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED) {
|
||||
start = nsio->res.start;
|
||||
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
}
|
||||
|
||||
start = nsio->res.start;
|
||||
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED) {
|
||||
size = resource_size(&nsio->res);
|
||||
end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
|
||||
}
|
||||
|
||||
if (start_pad + end_trunc)
|
||||
dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
|
||||
dev_name(&ndns->dev), start_pad + end_trunc);
|
||||
|
||||
/*
|
||||
* Note, we use 64 here for the standard size of struct page,
|
||||
* debugging options may cause it to be larger in which case the
|
||||
* implementation will limit the pfns advertised through
|
||||
* ->direct_access() to those that are included in the memmap.
|
||||
*/
|
||||
start += start_pad;
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
unsigned long memmap_size;
|
||||
|
||||
/*
|
||||
* vmemmap_populate_hugepages() allocates the memmap array in
|
||||
* HPAGE_SIZE chunks.
|
||||
*/
|
||||
memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
|
||||
offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
|
||||
nd_pfn->align) - start;
|
||||
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
||||
offset = ALIGN(start + SZ_8K + dax_label_reserve,
|
||||
nd_pfn->align) - start;
|
||||
else
|
||||
return -ENXIO;
|
||||
|
||||
if (offset + start_pad + end_trunc >= size) {
|
||||
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
|
||||
dev_name(&ndns->dev));
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
|
||||
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
|
||||
pfn_sb->dataoff = cpu_to_le64(offset);
|
||||
pfn_sb->npfns = cpu_to_le64(npfns);
|
||||
memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
|
||||
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
|
||||
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
|
||||
pfn_sb->version_major = cpu_to_le16(1);
|
||||
pfn_sb->version_minor = cpu_to_le16(2);
|
||||
pfn_sb->start_pad = cpu_to_le32(start_pad);
|
||||
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
|
||||
pfn_sb->align = cpu_to_le32(nd_pfn->align);
|
||||
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
||||
pfn_sb->checksum = cpu_to_le64(checksum);
|
||||
|
||||
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the effective resource range and vmem_altmap from an nd_pfn
|
||||
* instance.
|
||||
*/
|
||||
struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
|
||||
struct resource *res, struct vmem_altmap *altmap)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!nd_pfn->uuid || !nd_pfn->ndns)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
rc = nd_pfn_init(nd_pfn);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
/* we need a valid pfn_sb before we can init a vmem_altmap */
|
||||
return __nvdimm_setup_pfn(nd_pfn, res, altmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
|
||||
|
@ -33,10 +33,6 @@
|
||||
#include "nd.h"
|
||||
|
||||
struct pmem_device {
|
||||
struct request_queue *pmem_queue;
|
||||
struct gendisk *pmem_disk;
|
||||
struct nd_namespace_common *ndns;
|
||||
|
||||
/* One contiguous memory region per device */
|
||||
phys_addr_t phys_addr;
|
||||
/* when non-zero this device is hosting a 'pfn' instance */
|
||||
@ -50,23 +46,10 @@ struct pmem_device {
|
||||
struct badblocks bb;
|
||||
};
|
||||
|
||||
static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
|
||||
{
|
||||
if (bb->count) {
|
||||
sector_t first_bad;
|
||||
int num_bad;
|
||||
|
||||
return !!badblocks_check(bb, sector, len / 512, &first_bad,
|
||||
&num_bad);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
|
||||
unsigned int len)
|
||||
{
|
||||
struct device *dev = disk_to_dev(pmem->pmem_disk);
|
||||
struct device *dev = pmem->bb.dev;
|
||||
sector_t sector;
|
||||
long cleared;
|
||||
|
||||
@ -136,8 +119,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
||||
unsigned long start;
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
struct block_device *bdev = bio->bi_bdev;
|
||||
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
||||
struct pmem_device *pmem = q->queuedata;
|
||||
|
||||
do_acct = nd_iostat_start(bio, &start);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
@ -162,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
||||
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||
struct page *page, int rw)
|
||||
{
|
||||
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
||||
struct pmem_device *pmem = bdev->bd_queue->queuedata;
|
||||
int rc;
|
||||
|
||||
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
|
||||
@ -184,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
||||
void __pmem **kaddr, pfn_t *pfn)
|
||||
{
|
||||
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
||||
struct pmem_device *pmem = bdev->bd_queue->queuedata;
|
||||
resource_size_t offset = sector * 512 + pmem->data_offset;
|
||||
|
||||
*kaddr = pmem->virt_addr + offset;
|
||||
@ -200,104 +182,119 @@ static const struct block_device_operations pmem_fops = {
|
||||
.revalidate_disk = nvdimm_revalidate_disk,
|
||||
};
|
||||
|
||||
static struct pmem_device *pmem_alloc(struct device *dev,
|
||||
struct resource *res, int id)
|
||||
static void pmem_release_queue(void *q)
|
||||
{
|
||||
blk_cleanup_queue(q);
|
||||
}
|
||||
|
||||
void pmem_release_disk(void *disk)
|
||||
{
|
||||
del_gendisk(disk);
|
||||
put_disk(disk);
|
||||
}
|
||||
|
||||
static int pmem_attach_disk(struct device *dev,
|
||||
struct nd_namespace_common *ndns)
|
||||
{
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
struct vmem_altmap __altmap, *altmap = NULL;
|
||||
struct resource *res = &nsio->res;
|
||||
struct nd_pfn *nd_pfn = NULL;
|
||||
int nid = dev_to_node(dev);
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
struct pmem_device *pmem;
|
||||
struct resource pfn_res;
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk;
|
||||
void *addr;
|
||||
|
||||
/* while nsio_rw_bytes is active, parse a pfn info block if present */
|
||||
if (is_nd_pfn(dev)) {
|
||||
nd_pfn = to_nd_pfn(dev);
|
||||
altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
|
||||
if (IS_ERR(altmap))
|
||||
return PTR_ERR(altmap);
|
||||
}
|
||||
|
||||
/* we're attaching a block device, disable raw namespace access */
|
||||
devm_nsio_disable(dev, nsio);
|
||||
|
||||
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
|
||||
if (!pmem)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
dev_set_drvdata(dev, pmem);
|
||||
pmem->phys_addr = res->start;
|
||||
pmem->size = resource_size(res);
|
||||
if (!arch_has_wmb_pmem())
|
||||
dev_warn(dev, "unable to guarantee persistence of writes\n");
|
||||
|
||||
if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
|
||||
if (!devm_request_mem_region(dev, res->start, resource_size(res),
|
||||
dev_name(dev))) {
|
||||
dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
|
||||
&pmem->phys_addr, pmem->size);
|
||||
return ERR_PTR(-EBUSY);
|
||||
dev_warn(dev, "could not reserve region %pR\n", res);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
|
||||
if (!q)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
pmem->pfn_flags = PFN_DEV;
|
||||
if (pmem_should_map_pages(dev)) {
|
||||
pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
|
||||
if (is_nd_pfn(dev)) {
|
||||
addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
|
||||
altmap);
|
||||
pfn_sb = nd_pfn->pfn_sb;
|
||||
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
|
||||
pmem->pfn_flags |= PFN_MAP;
|
||||
res = &pfn_res; /* for badblocks populate */
|
||||
res->start += pmem->data_offset;
|
||||
} else if (pmem_should_map_pages(dev)) {
|
||||
addr = devm_memremap_pages(dev, &nsio->res,
|
||||
&q->q_usage_counter, NULL);
|
||||
pmem->pfn_flags |= PFN_MAP;
|
||||
} else
|
||||
pmem->virt_addr = (void __pmem *) devm_memremap(dev,
|
||||
pmem->phys_addr, pmem->size,
|
||||
ARCH_MEMREMAP_PMEM);
|
||||
addr = devm_memremap(dev, pmem->phys_addr,
|
||||
pmem->size, ARCH_MEMREMAP_PMEM);
|
||||
|
||||
if (IS_ERR(pmem->virt_addr)) {
|
||||
/*
|
||||
* At release time the queue must be dead before
|
||||
* devm_memremap_pages is unwound
|
||||
*/
|
||||
if (devm_add_action(dev, pmem_release_queue, q)) {
|
||||
blk_cleanup_queue(q);
|
||||
return (void __force *) pmem->virt_addr;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pmem->pmem_queue = q;
|
||||
return pmem;
|
||||
}
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
pmem->virt_addr = (void __pmem *) addr;
|
||||
|
||||
static void pmem_detach_disk(struct pmem_device *pmem)
|
||||
{
|
||||
if (!pmem->pmem_disk)
|
||||
return;
|
||||
|
||||
del_gendisk(pmem->pmem_disk);
|
||||
put_disk(pmem->pmem_disk);
|
||||
blk_cleanup_queue(pmem->pmem_queue);
|
||||
}
|
||||
|
||||
static int pmem_attach_disk(struct device *dev,
|
||||
struct nd_namespace_common *ndns, struct pmem_device *pmem)
|
||||
{
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
int nid = dev_to_node(dev);
|
||||
struct resource bb_res;
|
||||
struct gendisk *disk;
|
||||
|
||||
blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
|
||||
blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
|
||||
blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
|
||||
blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
|
||||
blk_queue_make_request(q, pmem_make_request);
|
||||
blk_queue_physical_block_size(q, PAGE_SIZE);
|
||||
blk_queue_max_hw_sectors(q, UINT_MAX);
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
|
||||
q->queuedata = pmem;
|
||||
|
||||
disk = alloc_disk_node(0, nid);
|
||||
if (!disk) {
|
||||
blk_cleanup_queue(pmem->pmem_queue);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
if (devm_add_action(dev, pmem_release_disk, disk)) {
|
||||
put_disk(disk);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
disk->fops = &pmem_fops;
|
||||
disk->private_data = pmem;
|
||||
disk->queue = pmem->pmem_queue;
|
||||
disk->queue = q;
|
||||
disk->flags = GENHD_FL_EXT_DEVT;
|
||||
nvdimm_namespace_disk_name(ndns, disk->disk_name);
|
||||
disk->driverfs_dev = dev;
|
||||
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
|
||||
/ 512);
|
||||
pmem->pmem_disk = disk;
|
||||
devm_exit_badblocks(dev, &pmem->bb);
|
||||
if (devm_init_badblocks(dev, &pmem->bb))
|
||||
return -ENOMEM;
|
||||
bb_res.start = nsio->res.start + pmem->data_offset;
|
||||
bb_res.end = nsio->res.end;
|
||||
if (is_nd_pfn(dev)) {
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||
|
||||
bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
|
||||
bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
|
||||
}
|
||||
nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
|
||||
&bb_res);
|
||||
nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
|
||||
disk->bb = &pmem->bb;
|
||||
add_disk(disk);
|
||||
revalidate_disk(disk);
|
||||
@ -305,346 +302,67 @@ static int pmem_attach_disk(struct device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmem_rw_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *buf, size_t size, int rw)
|
||||
{
|
||||
struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
|
||||
|
||||
if (unlikely(offset + size > pmem->size)) {
|
||||
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (rw == READ) {
|
||||
unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
|
||||
|
||||
if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
|
||||
return -EIO;
|
||||
return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
|
||||
} else {
|
||||
memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
|
||||
wmb_pmem();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
|
||||
struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
u32 start_pad = 0, end_trunc = 0;
|
||||
resource_size_t start, size;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct nd_region *nd_region;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
u64 checksum;
|
||||
int rc;
|
||||
|
||||
if (!pfn_sb)
|
||||
return -ENOMEM;
|
||||
|
||||
nd_pfn->pfn_sb = pfn_sb;
|
||||
rc = nd_pfn_validate(nd_pfn);
|
||||
if (rc == -ENODEV)
|
||||
/* no info block, do init */;
|
||||
else
|
||||
return rc;
|
||||
|
||||
nd_region = to_nd_region(nd_pfn->dev.parent);
|
||||
if (nd_region->ro) {
|
||||
dev_info(&nd_pfn->dev,
|
||||
"%s is read-only, unable to init metadata\n",
|
||||
dev_name(&nd_region->dev));
|
||||
goto err;
|
||||
}
|
||||
|
||||
memset(pfn_sb, 0, sizeof(*pfn_sb));
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM' when section aligned and
|
||||
* trim it accordingly
|
||||
*/
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
|
||||
size = resource_size(&nsio->res);
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED) {
|
||||
|
||||
start = nsio->res.start;
|
||||
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
}
|
||||
|
||||
start = nsio->res.start;
|
||||
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED) {
|
||||
size = resource_size(&nsio->res);
|
||||
end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
|
||||
}
|
||||
|
||||
if (start_pad + end_trunc)
|
||||
dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
|
||||
dev_name(&ndns->dev), start_pad + end_trunc);
|
||||
|
||||
/*
|
||||
* Note, we use 64 here for the standard size of struct page,
|
||||
* debugging options may cause it to be larger in which case the
|
||||
* implementation will limit the pfns advertised through
|
||||
* ->direct_access() to those that are included in the memmap.
|
||||
*/
|
||||
start += start_pad;
|
||||
npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
|
||||
if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
unsigned long memmap_size;
|
||||
|
||||
/*
|
||||
* vmemmap_populate_hugepages() allocates the memmap array in
|
||||
* PMD_SIZE chunks.
|
||||
*/
|
||||
memmap_size = ALIGN(64 * npfns, PMD_SIZE);
|
||||
offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
|
||||
- start;
|
||||
} else if (nd_pfn->mode == PFN_MODE_RAM)
|
||||
offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
|
||||
else
|
||||
goto err;
|
||||
|
||||
if (offset + start_pad + end_trunc >= pmem->size) {
|
||||
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
|
||||
dev_name(&ndns->dev));
|
||||
goto err;
|
||||
}
|
||||
|
||||
npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
|
||||
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
|
||||
pfn_sb->dataoff = cpu_to_le64(offset);
|
||||
pfn_sb->npfns = cpu_to_le64(npfns);
|
||||
memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
|
||||
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
|
||||
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
|
||||
pfn_sb->version_major = cpu_to_le16(1);
|
||||
pfn_sb->version_minor = cpu_to_le16(1);
|
||||
pfn_sb->start_pad = cpu_to_le32(start_pad);
|
||||
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
|
||||
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
||||
pfn_sb->checksum = cpu_to_le64(checksum);
|
||||
|
||||
rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
nd_pfn->pfn_sb = NULL;
|
||||
kfree(pfn_sb);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
|
||||
struct pmem_device *pmem;
|
||||
|
||||
/* free pmem disk */
|
||||
pmem = dev_get_drvdata(&nd_pfn->dev);
|
||||
pmem_detach_disk(pmem);
|
||||
|
||||
/* release nd_pfn resources */
|
||||
kfree(nd_pfn->pfn_sb);
|
||||
nd_pfn->pfn_sb = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We hotplug memory at section granularity, pad the reserved area from
|
||||
* the previous section base to the namespace base address.
|
||||
*/
|
||||
static unsigned long init_altmap_base(resource_size_t base)
|
||||
{
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
return PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
}
|
||||
|
||||
static unsigned long init_altmap_reserve(resource_size_t base)
|
||||
{
|
||||
unsigned long reserve = PHYS_PFN(SZ_8K);
|
||||
unsigned long base_pfn = PHYS_PFN(base);
|
||||
|
||||
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
|
||||
return reserve;
|
||||
}
|
||||
|
||||
static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
int rc;
|
||||
struct resource res;
|
||||
struct request_queue *q;
|
||||
struct pmem_device *pmem;
|
||||
struct vmem_altmap *altmap;
|
||||
struct device *dev = &nd_pfn->dev;
|
||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
|
||||
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
resource_size_t base = nsio->res.start + start_pad;
|
||||
struct vmem_altmap __altmap = {
|
||||
.base_pfn = init_altmap_base(base),
|
||||
.reserve = init_altmap_reserve(base),
|
||||
};
|
||||
|
||||
pmem = dev_get_drvdata(dev);
|
||||
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
|
||||
pmem->pfn_pad = start_pad + end_trunc;
|
||||
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
|
||||
if (nd_pfn->mode == PFN_MODE_RAM) {
|
||||
if (pmem->data_offset < SZ_8K)
|
||||
return -EINVAL;
|
||||
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
|
||||
altmap = NULL;
|
||||
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
|
||||
nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
|
||||
/ PAGE_SIZE;
|
||||
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
|
||||
dev_info(&nd_pfn->dev,
|
||||
"number of pfns truncated from %lld to %ld\n",
|
||||
le64_to_cpu(nd_pfn->pfn_sb->npfns),
|
||||
nd_pfn->npfns);
|
||||
altmap = & __altmap;
|
||||
altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
|
||||
altmap->alloc = 0;
|
||||
} else {
|
||||
rc = -ENXIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* establish pfn range for lookup, and switch to direct map */
|
||||
q = pmem->pmem_queue;
|
||||
memcpy(&res, &nsio->res, sizeof(res));
|
||||
res.start += start_pad;
|
||||
res.end -= end_trunc;
|
||||
devm_memunmap(dev, (void __force *) pmem->virt_addr);
|
||||
pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
|
||||
&q->q_usage_counter, altmap);
|
||||
pmem->pfn_flags |= PFN_MAP;
|
||||
if (IS_ERR(pmem->virt_addr)) {
|
||||
rc = PTR_ERR(pmem->virt_addr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* attach pmem disk in "pfn-mode" */
|
||||
rc = pmem_attach_disk(dev, ndns, pmem);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
return rc;
|
||||
err:
|
||||
nvdimm_namespace_detach_pfn(ndns);
|
||||
return rc;
|
||||
|
||||
}
|
||||
|
||||
static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
|
||||
{
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
|
||||
int rc;
|
||||
|
||||
if (!nd_pfn->uuid || !nd_pfn->ndns)
|
||||
return -ENODEV;
|
||||
|
||||
rc = nd_pfn_init(nd_pfn);
|
||||
if (rc)
|
||||
return rc;
|
||||
/* we need a valid pfn_sb before we can init a vmem_altmap */
|
||||
return __nvdimm_namespace_attach_pfn(nd_pfn);
|
||||
}
|
||||
|
||||
static int nd_pmem_probe(struct device *dev)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_namespace_common *ndns;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct pmem_device *pmem;
|
||||
|
||||
ndns = nvdimm_namespace_common_probe(dev);
|
||||
if (IS_ERR(ndns))
|
||||
return PTR_ERR(ndns);
|
||||
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
|
||||
if (IS_ERR(pmem))
|
||||
return PTR_ERR(pmem);
|
||||
if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
|
||||
return -ENXIO;
|
||||
|
||||
pmem->ndns = ndns;
|
||||
dev_set_drvdata(dev, pmem);
|
||||
ndns->rw_bytes = pmem_rw_bytes;
|
||||
if (devm_init_badblocks(dev, &pmem->bb))
|
||||
return -ENOMEM;
|
||||
nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
|
||||
|
||||
if (is_nd_btt(dev)) {
|
||||
/* btt allocates its own request_queue */
|
||||
blk_cleanup_queue(pmem->pmem_queue);
|
||||
pmem->pmem_queue = NULL;
|
||||
if (is_nd_btt(dev))
|
||||
return nvdimm_namespace_attach_btt(ndns);
|
||||
}
|
||||
|
||||
if (is_nd_pfn(dev))
|
||||
return nvdimm_namespace_attach_pfn(ndns);
|
||||
return pmem_attach_disk(dev, ndns);
|
||||
|
||||
if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
|
||||
/*
|
||||
* We'll come back as either btt-pmem, or pfn-pmem, so
|
||||
* drop the queue allocation for now.
|
||||
*/
|
||||
blk_cleanup_queue(pmem->pmem_queue);
|
||||
/* if we find a valid info-block we'll come back as that personality */
|
||||
if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return pmem_attach_disk(dev, ndns, pmem);
|
||||
/* ...otherwise we're just a raw pmem device */
|
||||
return pmem_attach_disk(dev, ndns);
|
||||
}
|
||||
|
||||
static int nd_pmem_remove(struct device *dev)
|
||||
{
|
||||
struct pmem_device *pmem = dev_get_drvdata(dev);
|
||||
|
||||
if (is_nd_btt(dev))
|
||||
nvdimm_namespace_detach_btt(pmem->ndns);
|
||||
else if (is_nd_pfn(dev))
|
||||
nvdimm_namespace_detach_pfn(pmem->ndns);
|
||||
else
|
||||
pmem_detach_disk(pmem);
|
||||
|
||||
nvdimm_namespace_detach_btt(to_nd_btt(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
|
||||
{
|
||||
struct pmem_device *pmem = dev_get_drvdata(dev);
|
||||
struct nd_namespace_common *ndns = pmem->ndns;
|
||||
struct nd_region *nd_region = to_nd_region(dev->parent);
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
struct resource res = {
|
||||
.start = nsio->res.start + pmem->data_offset,
|
||||
.end = nsio->res.end,
|
||||
};
|
||||
struct pmem_device *pmem = dev_get_drvdata(dev);
|
||||
resource_size_t offset = 0, end_trunc = 0;
|
||||
struct nd_namespace_common *ndns;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct resource res;
|
||||
|
||||
if (event != NVDIMM_REVALIDATE_POISON)
|
||||
return;
|
||||
|
||||
if (is_nd_pfn(dev)) {
|
||||
if (is_nd_btt(dev)) {
|
||||
struct nd_btt *nd_btt = to_nd_btt(dev);
|
||||
|
||||
ndns = nd_btt->ndns;
|
||||
} else if (is_nd_pfn(dev)) {
|
||||
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
|
||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||
|
||||
res.start += __le32_to_cpu(pfn_sb->start_pad);
|
||||
res.end -= __le32_to_cpu(pfn_sb->end_trunc);
|
||||
}
|
||||
ndns = nd_pfn->ndns;
|
||||
offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
|
||||
end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
|
||||
} else
|
||||
ndns = to_ndns(dev);
|
||||
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
res.start = nsio->res.start + offset;
|
||||
res.end = nsio->res.end - end_trunc;
|
||||
nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,7 @@ static int nd_region_probe(struct device *dev)
|
||||
|
||||
nd_region->btt_seed = nd_btt_create(nd_region);
|
||||
nd_region->pfn_seed = nd_pfn_create(nd_region);
|
||||
nd_region->dax_seed = nd_dax_create(nd_region);
|
||||
if (err == 0)
|
||||
return 0;
|
||||
|
||||
@ -86,6 +87,7 @@ static int nd_region_remove(struct device *dev)
|
||||
nd_region->ns_seed = NULL;
|
||||
nd_region->btt_seed = NULL;
|
||||
nd_region->pfn_seed = NULL;
|
||||
nd_region->dax_seed = NULL;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
nvdimm_bus_unlock(dev);
|
||||
|
||||
|
@ -306,6 +306,23 @@ static ssize_t pfn_seed_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(pfn_seed);
|
||||
|
||||
static ssize_t dax_seed_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev);
|
||||
ssize_t rc;
|
||||
|
||||
nvdimm_bus_lock(dev);
|
||||
if (nd_region->dax_seed)
|
||||
rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
|
||||
else
|
||||
rc = sprintf(buf, "\n");
|
||||
nvdimm_bus_unlock(dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(dax_seed);
|
||||
|
||||
static ssize_t read_only_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@ -335,6 +352,7 @@ static struct attribute *nd_region_attributes[] = {
|
||||
&dev_attr_mappings.attr,
|
||||
&dev_attr_btt_seed.attr,
|
||||
&dev_attr_pfn_seed.attr,
|
||||
&dev_attr_dax_seed.attr,
|
||||
&dev_attr_read_only.attr,
|
||||
&dev_attr_set_cookie.attr,
|
||||
&dev_attr_available_size.attr,
|
||||
@ -353,6 +371,9 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|
||||
if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
|
||||
return 0;
|
||||
|
||||
if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
|
||||
return 0;
|
||||
|
||||
if (a != &dev_attr_set_cookie.attr
|
||||
&& a != &dev_attr_available_size.attr)
|
||||
return a->mode;
|
||||
@ -441,6 +462,13 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
|
||||
nd_region_create_pfn_seed(nd_region);
|
||||
nvdimm_bus_unlock(dev);
|
||||
}
|
||||
if (is_nd_dax(dev) && probe) {
|
||||
nd_region = to_nd_region(dev->parent);
|
||||
nvdimm_bus_lock(dev);
|
||||
if (nd_region->dax_seed == dev)
|
||||
nd_region_create_dax_seed(nd_region);
|
||||
nvdimm_bus_unlock(dev);
|
||||
}
|
||||
}
|
||||
|
||||
void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
|
||||
@ -718,6 +746,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
|
||||
ida_init(&nd_region->ns_ida);
|
||||
ida_init(&nd_region->btt_ida);
|
||||
ida_init(&nd_region->pfn_ida);
|
||||
ida_init(&nd_region->dax_ida);
|
||||
dev = &nd_region->dev;
|
||||
dev_set_name(dev, "region%d", nd_region->id);
|
||||
dev->parent = &nvdimm_bus->dev;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/ndctl.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/badblocks.h>
|
||||
|
||||
enum nvdimm_event {
|
||||
NVDIMM_REVALIDATE_POISON,
|
||||
@ -55,13 +56,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev)
|
||||
}
|
||||
|
||||
/**
|
||||
* struct nd_namespace_io - infrastructure for loading an nd_pmem instance
|
||||
* struct nd_namespace_io - device representation of a persistent memory range
|
||||
* @dev: namespace device created by the nd region driver
|
||||
* @res: struct resource conversion of a NFIT SPA table
|
||||
* @size: cached resource_size(@res) for fast path size checks
|
||||
* @addr: virtual address to access the namespace range
|
||||
* @bb: badblocks list for the namespace range
|
||||
*/
|
||||
struct nd_namespace_io {
|
||||
struct nd_namespace_common common;
|
||||
struct resource res;
|
||||
resource_size_t size;
|
||||
void __pmem *addr;
|
||||
struct badblocks bb;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -82,6 +89,7 @@ struct nd_namespace_pmem {
|
||||
* @uuid: namespace name supplied in the dimm label
|
||||
* @id: ida allocated id
|
||||
* @lbasize: blk namespaces have a native sector size when btt not present
|
||||
* @size: sum of all the resource ranges allocated to this namespace
|
||||
* @num_resources: number of dpa extents to claim
|
||||
* @res: discontiguous dpa extents for given dimm
|
||||
*/
|
||||
@ -91,6 +99,7 @@ struct nd_namespace_blk {
|
||||
u8 *uuid;
|
||||
int id;
|
||||
unsigned long lbasize;
|
||||
resource_size_t size;
|
||||
int num_resources;
|
||||
struct resource **res;
|
||||
};
|
||||
|
@ -206,6 +206,7 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
|
||||
#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
|
||||
#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
|
||||
#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
|
||||
#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */
|
||||
|
||||
enum nd_driver_flags {
|
||||
ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM,
|
||||
@ -214,6 +215,7 @@ enum nd_driver_flags {
|
||||
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
|
||||
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
|
||||
ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
|
||||
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -7,6 +7,7 @@ ldflags-y += --wrap=ioremap_nocache
|
||||
ldflags-y += --wrap=iounmap
|
||||
ldflags-y += --wrap=memunmap
|
||||
ldflags-y += --wrap=__devm_request_region
|
||||
ldflags-y += --wrap=__devm_release_region
|
||||
ldflags-y += --wrap=__request_region
|
||||
ldflags-y += --wrap=__release_region
|
||||
ldflags-y += --wrap=devm_memremap_pages
|
||||
@ -49,6 +50,7 @@ libnvdimm-y += $(NVDIMM_SRC)/label.o
|
||||
libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o
|
||||
libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
|
||||
libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
|
||||
libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o
|
||||
libnvdimm-y += config_check.o
|
||||
|
||||
obj-m += test/
|
||||
|
@ -239,13 +239,11 @@ struct resource *__wrap___devm_request_region(struct device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap___devm_request_region);
|
||||
|
||||
void __wrap___release_region(struct resource *parent, resource_size_t start,
|
||||
resource_size_t n)
|
||||
static bool nfit_test_release_region(struct resource *parent,
|
||||
resource_size_t start, resource_size_t n)
|
||||
{
|
||||
struct nfit_test_resource *nfit_res;
|
||||
|
||||
if (parent == &iomem_resource) {
|
||||
nfit_res = get_nfit_res(start);
|
||||
struct nfit_test_resource *nfit_res = get_nfit_res(start);
|
||||
if (nfit_res) {
|
||||
struct resource *res = nfit_res->res + 1;
|
||||
|
||||
@ -254,11 +252,26 @@ void __wrap___release_region(struct resource *parent, resource_size_t start,
|
||||
__func__, start, n, res);
|
||||
else
|
||||
memset(res, 0, sizeof(*res));
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void __wrap___release_region(struct resource *parent, resource_size_t start,
|
||||
resource_size_t n)
|
||||
{
|
||||
if (!nfit_test_release_region(parent, start, n))
|
||||
__release_region(parent, start, n);
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap___release_region);
|
||||
|
||||
void __wrap___devm_release_region(struct device *dev, struct resource *parent,
|
||||
resource_size_t start, resource_size_t n)
|
||||
{
|
||||
if (!nfit_test_release_region(parent, start, n))
|
||||
__devm_release_region(dev, parent, start, n);
|
||||
}
|
||||
EXPORT_SYMBOL(__wrap___devm_release_region);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
Loading…
Reference in New Issue
Block a user