powerpc/mm: Remove dcache flush from memory remove.

We added dcache flush on memory add/remove in commit
fb5924fddf ("powerpc/mm: Flush cache on memory hot(un)plug") to
handle crashes on GPU hotplug. Instead of adding dcache flush in
generic memory add/remove routine which is used even for regular
memory, we should handle these devices specific flush in the device
driver code.

memtrace did handle this in the driver and that was removed by commit
7fd6641de2 ("powerpc/powernv/memtrace: Let the arch hotunplug code
flush cache"). This patch reverts that commit.

The dcache flush in memory add was removed by commit
ea458effa8 ("powerpc: Don't flush caches when adding memory") which
I don't think is correct. The reason why we require dcache flush in
memtrace is to make sure we don't have a dirty cache when we remap a
pfn to cache inhibited. We should do that when the memtrace module
removes the memory and make the pfn available for HTM traces to map it
as cache inhibited.

The other device mentioned in commit fb5924fddf ("powerpc/mm: Flush
cache on memory hot(un)plug") is nvlink device with coherent memory.
The support for that was removed in commit
7eb3cf7619 ("powerpc/powernv: remove unused NPU DMA code") and
commit 25b2995a35 ("mm: remove MEMORY_DEVICE_PUBLIC support")

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210203045812.234439-3-aneesh.kumar@linux.ibm.com
This commit is contained in:
Aneesh Kumar K.V 2021-02-03 10:28:12 +05:30 committed by Michael Ellerman
parent ec94b9b23d
commit 2ac02e5ece
2 changed files with 29 additions and 22 deletions

View File

@ -91,27 +91,6 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV; return -ENODEV;
} }
#define FLUSH_CHUNK_SIZE SZ_1G
/**
* flush_dcache_range_chunked(): Write any modified data cache blocks out to
* memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
* Does not invalidate the corresponding instruction cache blocks.
*
* @start: the start address
* @stop: the stop address (exclusive)
* @chunk: the max size of the chunks
*/
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
unsigned long chunk)
{
unsigned long i;
for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, i + chunk));
cond_resched();
}
}
int __ref arch_create_linear_mapping(int nid, u64 start, u64 size, int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params) struct mhp_params *params)
{ {
@ -136,7 +115,6 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size)
/* Remove htab bolted mappings for this section of memory */ /* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start); start = (unsigned long)__va(start);
flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
mutex_lock(&linear_mapping_mutex); mutex_lock(&linear_mapping_mutex);
ret = remove_section_mapping(start, start + size); ret = remove_section_mapping(start, start + size);

View File

@ -19,6 +19,7 @@
#include <linux/numa.h> #include <linux/numa.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/debugfs.h> #include <asm/debugfs.h>
#include <asm/cacheflush.h>
/* This enables us to keep track of the memory removed from each node. */ /* This enables us to keep track of the memory removed from each node. */
struct memtrace_entry { struct memtrace_entry {
@ -51,6 +52,27 @@ static const struct file_operations memtrace_fops = {
.open = simple_open, .open = simple_open,
}; };
#define FLUSH_CHUNK_SIZE SZ_1G
/**
* flush_dcache_range_chunked(): Write any modified data cache blocks out to
* memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
* Does not invalidate the corresponding instruction cache blocks.
*
* @start: the start address
* @stop: the stop address (exclusive)
* @chunk: the max size of the chunks
*/
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
unsigned long chunk)
{
unsigned long i;
for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, i + chunk));
cond_resched();
}
}
static void memtrace_clear_range(unsigned long start_pfn, static void memtrace_clear_range(unsigned long start_pfn,
unsigned long nr_pages) unsigned long nr_pages)
{ {
@ -62,6 +84,13 @@ static void memtrace_clear_range(unsigned long start_pfn,
cond_resched(); cond_resched();
clear_page(__va(PFN_PHYS(pfn))); clear_page(__va(PFN_PHYS(pfn)));
} }
/*
* Before we go ahead and use this range as cache inhibited range
* flush the cache.
*/
flush_dcache_range_chunked(PFN_PHYS(start_pfn),
PFN_PHYS(start_pfn + nr_pages),
FLUSH_CHUNK_SIZE);
} }
static u64 memtrace_alloc_node(u32 nid, u64 size) static u64 memtrace_alloc_node(u32 nid, u64 size)