From b9663a6ff8289a095d56d9a3a3f9c185a7b7b0d7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 22 Apr 2022 13:20:21 -0400 Subject: [PATCH 1/2] tools: Add kmem_cache_alloc_lru() Turn kmem_cache_alloc() into a wrapper around kmem_cache_alloc_lru(). Fixes: 9bbdc0f32409 ("xarray: use kmem_cache_alloc_lru to allocate xa_node") Signed-off-by: Matthew Wilcox (Oracle) Reported-by: Liam R. Howlett Reported-by: Li Wang --- tools/include/linux/slab.h | 8 +++++++- tools/testing/radix-tree/linux.c | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h index f41d8a0eb1a4..0616409513eb 100644 --- a/tools/include/linux/slab.h +++ b/tools/include/linux/slab.h @@ -28,7 +28,13 @@ static inline void *kzalloc(size_t size, gfp_t gfp) return kmalloc(size, gfp | __GFP_ZERO); } -void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); +struct list_lru; + +void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *, int flags); +static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) +{ + return kmem_cache_alloc_lru(cachep, NULL, flags); +} void kmem_cache_free(struct kmem_cache *cachep, void *objp); struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 81539f543954..d5c1bcba86fe 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -25,7 +25,8 @@ struct kmem_cache { void (*ctor)(void *); }; -void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp) +void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, + int gfp) { void *p; From 63b1898fffcd8bd81905b95104ecc52b45a97e21 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 22 Apr 2022 13:23:12 -0400 Subject: [PATCH 2/2] XArray: Disallow sibling entries of nodes There is a race between xas_split() and xas_load() which can result in the wrong page being returned, and thus data corruption. Fortunately, it's hard to hit (syzbot took three months to find it) and often guarded with VM_BUG_ON(). The anatomy of this race is: thread A thread B order-9 page is stored at index 0x200 lookup of page at index 0x274 page split starts load of sibling entry at offset 9 stores nodes at offsets 8-15 load of entry at offset 8 The entry at offset 8 turns out to be a node, and so we descend into it, and load the page at index 0x234 instead of 0x274. This is hard to fix on the split side; we could replace the entire node that contains the order-9 page instead of replacing the eight entries. Fixing it on the lookup side is easier; just disallow sibling entries that point to nodes. This cannot ever be a useful thing as the descent would not know the correct offset to use within the new node. The test suite continues to pass, but I have not added a new test for this bug. Reported-by: syzbot+cf4cf13056f85dec2c40@syzkaller.appspotmail.com Tested-by: syzbot+cf4cf13056f85dec2c40@syzkaller.appspotmail.com Fixes: 6b24ca4a1a8d ("mm: Use multi-index entries in the page cache") Signed-off-by: Matthew Wilcox (Oracle) --- lib/xarray.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/xarray.c b/lib/xarray.c index 4acc88ea7c21..54e646e8e6ee 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node) if (xa_is_sibling(entry)) { offset = xa_to_sibling(entry); entry = xa_entry(xas->xa, node, offset); + if (node->shift && xa_is_node(entry)) + entry = XA_RETRY_ENTRY; } xas->xa_offset = offset;