mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "Three fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/ksm.c: don't WARN if page is still mapped in remove_stable_node() mm/memory_hotplug: don't access uninitialized memmaps in shrink_zone_span() Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
This commit is contained in:
commit
cc079039c9
@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
|
||||
return loc->xl_ops->xlo_check_space(loc, xi);
|
||||
}
|
||||
|
||||
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
|
||||
{
|
||||
loc->xl_ops->xlo_add_entry(loc, name_hash);
|
||||
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
|
||||
/*
|
||||
* We can't leave the new entry's xe_name_offset at zero or
|
||||
* add_namevalue() will go nuts. We set it to the size of our
|
||||
* storage so that it can never be less than any other entry.
|
||||
*/
|
||||
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
|
||||
}
|
||||
|
||||
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
|
||||
struct ocfs2_xattr_info *xi)
|
||||
{
|
||||
@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (!loc->xl_entry) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
|
||||
orig_value_size = loc->xl_entry->xe_value_size;
|
||||
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
|
||||
if (rc)
|
||||
goto out;
|
||||
goto alloc_value;
|
||||
}
|
||||
|
||||
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
|
||||
orig_clusters = ocfs2_xa_value_clusters(loc);
|
||||
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
|
||||
if (rc) {
|
||||
mlog_errno(rc);
|
||||
ocfs2_xa_cleanup_value_truncate(loc,
|
||||
"overwriting",
|
||||
orig_clusters);
|
||||
goto out;
|
||||
if (loc->xl_entry) {
|
||||
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
|
||||
orig_value_size = loc->xl_entry->xe_value_size;
|
||||
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
|
||||
if (rc)
|
||||
goto out;
|
||||
goto alloc_value;
|
||||
}
|
||||
}
|
||||
ocfs2_xa_wipe_namevalue(loc);
|
||||
|
||||
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
|
||||
orig_clusters = ocfs2_xa_value_clusters(loc);
|
||||
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
|
||||
if (rc) {
|
||||
mlog_errno(rc);
|
||||
ocfs2_xa_cleanup_value_truncate(loc,
|
||||
"overwriting",
|
||||
orig_clusters);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ocfs2_xa_wipe_namevalue(loc);
|
||||
} else
|
||||
ocfs2_xa_add_entry(loc, name_hash);
|
||||
|
||||
/*
|
||||
* If we get here, we have a blank entry. Fill it. We grow our
|
||||
|
14
mm/ksm.c
14
mm/ksm.c
@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(page_mapped(page))) {
|
||||
/*
|
||||
* This should not happen: but if it does, just refuse to let
|
||||
* merge_across_nodes be switched - there is no need to panic.
|
||||
*/
|
||||
err = -EBUSY;
|
||||
} else {
|
||||
/*
|
||||
* Page could be still mapped if this races with __mmput() running in
|
||||
* between ksm_exit() and exit_mmap(). Just refuse to let
|
||||
* merge_across_nodes/max_page_sharing be switched.
|
||||
*/
|
||||
err = -EBUSY;
|
||||
if (!page_mapped(page)) {
|
||||
/*
|
||||
* The stable node did not yet appear stale to get_ksm_page(),
|
||||
* since that allows for an unmapped ksm page to be recognized
|
||||
|
@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
|
||||
if (unlikely(!pfn_valid(start_pfn)))
|
||||
if (unlikely(!pfn_to_online_page(start_pfn)))
|
||||
continue;
|
||||
|
||||
if (unlikely(pfn_to_nid(start_pfn) != nid))
|
||||
@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
|
||||
/* pfn is the end pfn of a memory section. */
|
||||
pfn = end_pfn - 1;
|
||||
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
|
||||
if (unlikely(!pfn_valid(pfn)))
|
||||
if (unlikely(!pfn_to_online_page(pfn)))
|
||||
continue;
|
||||
|
||||
if (unlikely(pfn_to_nid(pfn) != nid))
|
||||
@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
|
||||
*/
|
||||
pfn = zone_start_pfn;
|
||||
for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
|
||||
if (unlikely(!pfn_valid(pfn)))
|
||||
if (unlikely(!pfn_to_online_page(pfn)))
|
||||
continue;
|
||||
|
||||
if (page_zone(pfn_to_page(pfn)) != zone)
|
||||
@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
|
||||
struct pglist_data *pgdat = zone->zone_pgdat;
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef CONFIG_ZONE_DEVICE
|
||||
/*
|
||||
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
|
||||
* we will not try to shrink the zones - which is okay as
|
||||
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
|
||||
*/
|
||||
if (zone_idx(zone) == ZONE_DEVICE)
|
||||
return;
|
||||
#endif
|
||||
|
||||
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
||||
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
|
||||
update_pgdat_span(pgdat);
|
||||
|
Loading…
Reference in New Issue
Block a user