forked from Minki/linux
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm fixes from Dan Williams: "nvdimm fixes for v4.8, two of them are tagged for -stable: - Fix devm_memremap_pages() to use track_pfn_insert(). Otherwise, DAX pmd mappings end up with an uncached pgprot, and unusable performance for the device-dax interface. The device-dax interface appeared in 4.7 so this is tagged for -stable. - Fix a couple VM_BUG_ON() checks in the show_smaps() path to understand DAX pmd entries. This fix is tagged for -stable. - Fix a mis-merge of the nfit machine-check handler to flip the polarity of an if() to match the final version of the patch that Vishal sent for 4.8-rc1. Without this the nfit machine check handler never detects / inserts new 'badblocks' entries which applications use to identify lost portions of files. - For test purposes, fix the nvdimm_clear_poison() path to operate on legacy / simulated nvdimm memory ranges. Without this fix a test can set badblocks, but never clear them on these ranges. - Fix the range checking done by dax_dev_pmd_fault(). This is not tagged for -stable since this problem is mitigated by specifying aligned resources at device-dax setup time. These patches have appeared in a next release over the past week. The recent rebase you can see in the timestamps was to drop an invalid fix as identified by the updated device-dax unit tests [1]. The -mm touches have an ack from Andrew" [1]: "[ndctl PATCH 0/3] device-dax test for recent kernel bugs" https://lists.01.org/pipermail/linux-nvdimm/2016-September/006855.html * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: libnvdimm: allow legacy (e820) pmem region to clear bad blocks nfit, mce: Fix SPA matching logic in MCE handler mm: fix cache mode of dax pmd mappings mm: fix show_smap() for zone_device-pmd ranges dax: fix mapping size check
This commit is contained in:
commit
98ac9a608d
@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
/*
|
||||
* prot is passed in as a parameter for the new mapping. If the vma has a
|
||||
* linear pfn mapping for the entire range reserve the entire vma range with
|
||||
* single reserve_pfn_range call.
|
||||
* prot is passed in as a parameter for the new mapping. If the vma has
|
||||
* a linear pfn mapping for the entire range, or no vma is provided,
|
||||
* reserve the entire pfn + size range with single reserve_pfn_range
|
||||
* call.
|
||||
*/
|
||||
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
|
||||
unsigned long pfn, unsigned long addr, unsigned long size)
|
||||
@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
|
||||
enum page_cache_mode pcm;
|
||||
|
||||
/* reserve the whole chunk starting from paddr */
|
||||
if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
|
||||
if (!vma || (addr == vma->vm_start
|
||||
&& size == (vma->vm_end - vma->vm_start))) {
|
||||
int ret;
|
||||
|
||||
ret = reserve_pfn_range(paddr, size, prot, 0);
|
||||
if (!ret)
|
||||
if (ret == 0 && vma)
|
||||
vma->vm_flags |= VM_PAT;
|
||||
return ret;
|
||||
}
|
||||
@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
|
||||
if (!(vma->vm_flags & VM_PAT))
|
||||
if (vma && !(vma->vm_flags & VM_PAT))
|
||||
return;
|
||||
|
||||
/* free the chunk starting from pfn or the whole chunk */
|
||||
@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
if (vma)
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
|
||||
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
||||
struct acpi_nfit_system_address *spa = nfit_spa->spa;
|
||||
|
||||
if (nfit_spa_type(spa) == NFIT_SPA_PM)
|
||||
if (nfit_spa_type(spa) != NFIT_SPA_PM)
|
||||
continue;
|
||||
/* find the spa that covers the mce addr */
|
||||
if (spa->address > mce->addr)
|
||||
|
@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
|
||||
}
|
||||
|
||||
pgoff = linear_page_index(vma, pmd_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
|
@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
|
||||
return -ENXIO;
|
||||
|
||||
nd_desc = nvdimm_bus->nd_desc;
|
||||
/*
|
||||
* if ndctl does not exist, it's PMEM_LEGACY and
|
||||
* we want to just pretend everything is handled.
|
||||
*/
|
||||
if (!nd_desc->ndctl)
|
||||
return -ENXIO;
|
||||
return len;
|
||||
|
||||
memset(&ars_cap, 0, sizeof(ars_cap));
|
||||
ars_cap.address = phys;
|
||||
|
@ -581,6 +581,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
mss->anonymous_thp += HPAGE_PMD_SIZE;
|
||||
else if (PageSwapBacked(page))
|
||||
mss->shmem_thp += HPAGE_PMD_SIZE;
|
||||
else if (is_zone_device_page(page))
|
||||
/* pass */;
|
||||
else
|
||||
VM_BUG_ON_PAGE(1, page);
|
||||
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
|
||||
|
@ -247,6 +247,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
|
||||
align_start = res->start & ~(SECTION_SIZE - 1);
|
||||
align_size = ALIGN(resource_size(res), SECTION_SIZE);
|
||||
arch_remove_memory(align_start, align_size);
|
||||
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
|
||||
pgmap_radix_release(res);
|
||||
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
|
||||
"%s: failed to free all reserved pages\n", __func__);
|
||||
@ -282,6 +283,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
|
||||
struct percpu_ref *ref, struct vmem_altmap *altmap)
|
||||
{
|
||||
resource_size_t key, align_start, align_size, align_end;
|
||||
pgprot_t pgprot = PAGE_KERNEL;
|
||||
struct dev_pagemap *pgmap;
|
||||
struct page_map *page_map;
|
||||
int error, nid, is_ram;
|
||||
@ -351,6 +353,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
|
||||
if (nid < 0)
|
||||
nid = numa_mem_id();
|
||||
|
||||
error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
|
||||
align_size);
|
||||
if (error)
|
||||
goto err_pfn_remap;
|
||||
|
||||
error = arch_add_memory(nid, align_start, align_size, true);
|
||||
if (error)
|
||||
goto err_add_memory;
|
||||
@ -371,6 +378,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
|
||||
return __va(res->start);
|
||||
|
||||
err_add_memory:
|
||||
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
|
||||
err_pfn_remap:
|
||||
err_radix:
|
||||
pgmap_radix_release(res);
|
||||
devres_free(page_map);
|
||||
|
@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
goto out;
|
||||
|
||||
page = pmd_page(*pmd);
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
|
||||
if (flags & FOLL_TOUCH)
|
||||
touch_pmd(vma, addr, pmd);
|
||||
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
|
||||
@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
}
|
||||
skip_mlock:
|
||||
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
|
||||
if (flags & FOLL_GET)
|
||||
get_page(page);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user