forked from Minki/linux
s390/vmemmap: take the vmem_mutex when populating/freeing
Let's synchronize all accesses to the 1:1 and vmemmap mappings. This will be especially relevant when wanting to cleanup empty page tables that could be shared by both. Avoid races when removing tables that might be just about to get reused. Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20200722094558.9828-6-david@redhat.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
c00f05a924
commit
aa18e0e658
@ -334,17 +334,21 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vmem_mutex);
|
||||
/* We don't care about the node, just use NUMA_NO_NODE on allocations */
|
||||
ret = add_pagetable(start, end, false);
|
||||
if (ret)
|
||||
remove_pagetable(start, end, false);
|
||||
mutex_unlock(&vmem_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmemmap_free(unsigned long start, unsigned long end,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
mutex_lock(&vmem_mutex);
|
||||
remove_pagetable(start, end, false);
|
||||
mutex_unlock(&vmem_mutex);
|
||||
}
|
||||
|
||||
void vmem_remove_mapping(unsigned long start, unsigned long size)
|
||||
|
Loading…
Reference in New Issue
Block a user