hardening fixes for v5.19-rc3

- Correctly handle vm_map areas in hardened usercopy (Matthew Wilcox)
 
 - Adjust CFI RCU usage to avoid boot splats with cpuidle (Sami Tolvanen)
 -----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCgA0FiEEpcP2jyKd1g9yPm4TiXL039xtwCYFAmKqSXAWHGtlZXNjb29r
 QGNocm9taXVtLm9yZwAKCRCJcvTf3G3AJii5D/4ppvlx5XQ9f9N8k4NJz3W3S2wB
 lpFw9YAMWo1pgs9FSozKrftJWPuxNnD4EmvR6fYaZMsqfxVKQeKiNFbD9r1HiKwu
 Tuubzxm3VF5Ex09l+QLEHFYZOIHpZcEeiVGmoCTWmw3Q1dQGM1VgpHjhQtgKfEY6
 fs24VtC/P2S3D5F9vZghqTmP+F6fqSHW7LvDPwJMDx/Ppd2vDUYM9TvObHNks3y1
 jA7Vn8MjaQK1ItXf3ne2lUF0MCHrt3eo1CO0JCJYWlyLrBzpvUnOh8C24gdn8bss
 jNzlEoQERCgtp7KTkkJQXA283QkHKnKtf0lH/yd5RWthlL3/fnHmDKq+jom5zgiZ
 t9Rj8qDG1AFY1fM6zWTrKFGTEur8WRG1+QLb+7R0gmTX+ee1adHWZGqP1d58qHwG
 rAiCOi8LKQk+4giHqsYajksjurgr4pc3BMYo4Kf0edRCdUskfFDay3usmk6I9iUh
 c0siBugaxaQOZXUx9U7D2ZSRJ1XduYE7u7AgEDhL+prfgBKveBAfbDehjBCdJDPn
 a86UEuo3lZrPPjA3a52UyBjY7yEPAni8f59tMltpqJF9BvA6iWO+pX1N/eT3gLWn
 V1Kq1wkfpVH+1azlzHlNvngEb4ToHC3ljywH816whlJU81JzvgP30rGIcTDOr3wi
 PsiyNNVT5FEXOnQHpQ==
 =nRIG
 -----END PGP SIGNATURE-----

Merge tag 'hardening-v5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull hardening fixes from Kees Cook:

 - Correctly handle vm_map areas in hardened usercopy (Matthew Wilcox)

 - Adjust CFI RCU usage to avoid boot splats with cpuidle (Sami Tolvanen)

* tag 'hardening-v5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  usercopy: Make usercopy resilient against ridiculously large copies
  usercopy: Cast pointer to an integer once
  usercopy: Handle vm_map_ram() areas
  cfi: Fix __cfi_slowpath_diag RCU usage with cpuidle
This commit is contained in:
Linus Torvalds 2022-06-15 14:20:26 -07:00
commit 30306f6194
4 changed files with 30 additions and 21 deletions

View File

@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
struct vmap_area *find_vmap_area(unsigned long addr);
static inline bool is_vm_area_hugepages(const void *addr)
{

View File

@ -281,6 +281,8 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
static inline cfi_check_fn find_check_fn(unsigned long ptr)
{
cfi_check_fn fn = NULL;
unsigned long flags;
bool rcu_idle;
if (is_kernel_text(ptr))
return __cfi_check;
@ -290,13 +292,21 @@ static inline cfi_check_fn find_check_fn(unsigned long ptr)
* the shadow and __module_address use RCU, so we need to wake it
* up if necessary.
*/
RCU_NONIDLE({
if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
fn = find_shadow_check_fn(ptr);
rcu_idle = !rcu_is_watching();
if (rcu_idle) {
local_irq_save(flags);
rcu_irq_enter();
}
if (!fn)
fn = find_module_check_fn(ptr);
});
if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
fn = find_shadow_check_fn(ptr);
if (!fn)
fn = find_module_check_fn(ptr);
if (rcu_idle) {
rcu_irq_exit();
local_irq_restore(flags);
}
return fn;
}

View File

@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user)
{
uintptr_t addr = (uintptr_t)ptr;
unsigned long offset;
struct folio *folio;
if (is_kmap_addr(ptr)) {
unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
if ((unsigned long)ptr + n - 1 > page_end)
usercopy_abort("kmap", NULL, to_user,
offset_in_page(ptr), n);
offset = offset_in_page(ptr);
if (n > PAGE_SIZE - offset)
usercopy_abort("kmap", NULL, to_user, offset, n);
return;
}
if (is_vmalloc_addr(ptr)) {
struct vm_struct *area = find_vm_area(ptr);
unsigned long offset;
struct vmap_area *area = find_vmap_area(addr);
if (!area) {
if (!area)
usercopy_abort("vmalloc", "no area", to_user, 0, n);
return;
}
offset = ptr - area->addr;
if (offset + n > get_vm_area_size(area))
if (n > area->va_end - addr) {
offset = addr - area->va_start;
usercopy_abort("vmalloc", NULL, to_user, offset, n);
}
return;
}
@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user);
} else if (folio_test_large(folio)) {
unsigned long offset = ptr - folio_address(folio);
if (offset + n > folio_size(folio))
offset = ptr - folio_address(folio);
if (n > folio_size(folio) - offset)
usercopy_abort("page alloc", NULL, to_user, offset, n);
}
}

View File

@ -1798,7 +1798,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
free_vmap_area_noflush(va);
}
static struct vmap_area *find_vmap_area(unsigned long addr)
struct vmap_area *find_vmap_area(unsigned long addr)
{
struct vmap_area *va;