Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "13 patches.

  Subsystems affected by this patch series: mm (kasan, pagealloc, rmap,
  hmm, and hugetlb), and hfs"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/hugetlb: fix refs calculation from unaligned @vaddr
  hfs: add lock nesting notation to hfs_find_init
  hfs: fix high memory mapping in hfs_bnode_read
  hfs: add missing clean-up in hfs_fill_super
  lib/test_hmm: remove set but unused page variable
  mm: fix the try_to_unmap prototype for !CONFIG_MMU
  mm/page_alloc: further fix __alloc_pages_bulk() return value
  mm/page_alloc: correct return value when failing at preparing
  mm/page_alloc: avoid page allocator recursion with pagesets.lock held
  Revert "mm/page_alloc: make should_fail_alloc_page() static"
  kasan: fix build by including kernel.h
  kasan: add memzero init for unaligned size at DEBUG
  mm: move helper to check slub_debug_enabled
This commit is contained in:
Linus Torvalds
2021-07-15 12:17:05 -07:00
12 changed files with 96 additions and 41 deletions

View File

@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
fd->key = ptr + tree->max_key_len + 2;
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
tree->cnid, __builtin_return_address(0));
mutex_lock(&tree->tree_lock);
switch (tree->cnid) {
case HFS_CAT_CNID:
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
break;
case HFS_EXT_CNID:
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
break;
case HFS_ATTR_CNID:
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
break;
default:
return -EINVAL;
}
return 0;
}

View File

@@ -15,16 +15,31 @@
#include "btree.h"
void hfs_bnode_read(struct hfs_bnode *node, void *buf,
int off, int len)
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
{
struct page *page;
int pagenum;
int bytes_read;
int bytes_to_read;
void *vaddr;
off += node->page_offset;
page = node->page[0];
pagenum = off >> PAGE_SHIFT;
off &= ~PAGE_MASK; /* compute page offset for the first page */
memcpy(buf, kmap(page) + off, len);
kunmap(page);
for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
if (pagenum >= node->tree->pages_per_bnode)
break;
page = node->page[pagenum];
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
vaddr = kmap_atomic(page);
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
kunmap_atomic(vaddr);
pagenum++;
off = 0; /* page offset only applies to the first page */
}
}
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)

View File

@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
#define NODE_HASH_SIZE 256
/* B-tree mutex nested subclasses */
enum hfs_btree_mutex_classes {
CATALOG_BTREE_MUTEX,
EXTENTS_BTREE_MUTEX,
ATTR_BTREE_MUTEX,
};
/* A HFS BTree held in memory */
struct hfs_btree {
struct super_block *sb;

View File

@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
if (!res) {
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
res = -EIO;
goto bail;
goto bail_hfs_find;
}
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
}
if (res) {
hfs_find_exit(&fd);
goto bail_no_root;
}
if (res)
goto bail_hfs_find;
res = -EINVAL;
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
hfs_find_exit(&fd);
@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
/* everything's okay */
return 0;
bail_hfs_find:
hfs_find_exit(&fd);
bail_no_root:
pr_err("get root inode failed\n");
bail: