arm64 fix for 5.14

- Fix dma_map_resource() by reverting back to old pfn_valid() code
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmEnX+UQHHdpbGxAa2Vy
 bmVsLm9yZwAKCRC3rHDchMFjNN/lB/wInWMhFLs/uowiTi7hJr9uMO+w43yRv11O
 xQqfOyxH5Zo3NNffWJ9y7ysDDYEtgU1oBBLaSBT/SQxbf3B3Sspcvtsqwib60BhN
 CzQf4eNqoW8Q+pdRUyIJE5LtifiBbUGYAjva/RpyFgbMlBo/9uRSUQi65QqEh0mp
 sIWyLQdql3EmmWIu3bMWHcLH3pLvSGmy4Uh38gnKv/TevsugGDCvssR2GwY2k6mQ
 pvAAJC5eFYgEqamGShAf4gg3wTPDQlCs1S/M0f3sFrw4H7zpWEbaPulCUSeRoy6W
 U1olSc8yTZ3PlMl4IwFOsEmchZhovKveuTfrSz5vmkhTiW66IM5d
 =gaHA
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fix from Will Deacon:
 "We received a report this week that the generic version of
  pfn_valid(), which we switched to this merge window in 16c9afc776
  ("arm64/mm: drop HAVE_ARCH_PFN_VALID"), interacts badly with
  dma_map_resource() due to the following check:

        /* Don't allow RAM to be mapped */
        if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
                return DMA_MAPPING_ERROR;

  Since the ongoing saga to determine the semantics of pfn_valid() is
  unlikely to be resolved this week (does it indicate valid memory, or
  just the presence of a struct page, or whether that struct page has
  been initialised?), just revert back to our old version of pfn_valid()
  for 5.14.

  Summary:

   - Fix dma_map_resource() by reverting back to old pfn_valid() code"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  Partially revert "arm64/mm: drop HAVE_ARCH_PFN_VALID"
This commit is contained in:
Linus Torvalds 2021-08-26 11:26:00 -07:00
commit 1a6d80ff24
3 changed files with 39 additions and 0 deletions

View File

@ -156,6 +156,7 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER

View File

@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to);
typedef struct page *pgtable_t;
int pfn_valid(unsigned long pfn);
int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>

View File

@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns);
}
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
struct mem_section *ms;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
* pfn. Else it might lead to false positives when
* some of the upper bits are set, but the lower bits
* match a valid pfn.
*/
if (PHYS_PFN(addr) != pfn)
return 0;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0;
/*
* ZONE_DEVICE memory does not have the memblock entries.
* memblock_is_map_memory() check for ZONE_DEVICE based
* addresses will always fail. Even the normal hotplugged
* memory will never have MEMBLOCK_NOMAP flag set in their
* memblock entries. Skip memblock search for all non early
* memory sections covering all of hotplug memory including
* both normal and ZONE_DEVICE based.
*/
if (!early_section(ms))
return pfn_section_valid(ms, pfn);
return memblock_is_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
int pfn_is_map_memory(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);