forked from Minki/linux
mm/page_alloc: fix tracepoint mm_page_alloc_zone_locked()
Currently, trace point mm_page_alloc_zone_locked() doesn't show correct
information.
First, when alloc_flag has ALLOC_HARDER/ALLOC_CMA, page can be allocated
from MIGRATE_HIGHATOMIC/MIGRATE_CMA. Nevertheless, tracepoint use
requested migration type not MIGRATE_HIGHATOMIC and MIGRATE_CMA.
Second, after commit 44042b4498
("mm/page_alloc: allow high-order pages
to be stored on the per-cpu lists") percpu-list can store high order
pages. But trace point determine whether it is a refiil of percpu-list by
comparing requested order and 0.
To handle these problems, make mm_page_alloc_zone_locked() only be called
by __rmqueue_smallest with correct migration type. With a new argument
called percpu_refill, it can show roughly whether it is a refill of
percpu-list.
Link: https://lkml.kernel.org/r/20220512025307.57924-1-vvghjk1234@gmail.com
Signed-off-by: Wonhyuk Yang <vvghjk1234@gmail.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Baik Song An <bsahn@etri.re.kr>
Cc: Hong Yeon Kim <kimhy@etri.re.kr>
Cc: Taeung Song <taeung@reallinux.co.kr>
Cc: <linuxgeek@linuxgeek.io>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3645b5ec0a
commit
10e0f75302
@ -229,20 +229,23 @@ TRACE_EVENT(mm_page_alloc,
|
|||||||
|
|
||||||
DECLARE_EVENT_CLASS(mm_page,
|
DECLARE_EVENT_CLASS(mm_page,
|
||||||
|
|
||||||
TP_PROTO(struct page *page, unsigned int order, int migratetype),
|
TP_PROTO(struct page *page, unsigned int order, int migratetype,
|
||||||
|
int percpu_refill),
|
||||||
|
|
||||||
TP_ARGS(page, order, migratetype),
|
TP_ARGS(page, order, migratetype, percpu_refill),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( unsigned long, pfn )
|
__field( unsigned long, pfn )
|
||||||
__field( unsigned int, order )
|
__field( unsigned int, order )
|
||||||
__field( int, migratetype )
|
__field( int, migratetype )
|
||||||
|
__field( int, percpu_refill )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->pfn = page ? page_to_pfn(page) : -1UL;
|
__entry->pfn = page ? page_to_pfn(page) : -1UL;
|
||||||
__entry->order = order;
|
__entry->order = order;
|
||||||
__entry->migratetype = migratetype;
|
__entry->migratetype = migratetype;
|
||||||
|
__entry->percpu_refill = percpu_refill;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
|
TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
|
||||||
@ -250,14 +253,15 @@ DECLARE_EVENT_CLASS(mm_page,
|
|||||||
__entry->pfn != -1UL ? __entry->pfn : 0,
|
__entry->pfn != -1UL ? __entry->pfn : 0,
|
||||||
__entry->order,
|
__entry->order,
|
||||||
__entry->migratetype,
|
__entry->migratetype,
|
||||||
__entry->order == 0)
|
__entry->percpu_refill)
|
||||||
);
|
);
|
||||||
|
|
||||||
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
|
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
|
||||||
|
|
||||||
TP_PROTO(struct page *page, unsigned int order, int migratetype),
|
TP_PROTO(struct page *page, unsigned int order, int migratetype,
|
||||||
|
int percpu_refill),
|
||||||
|
|
||||||
TP_ARGS(page, order, migratetype)
|
TP_ARGS(page, order, migratetype, percpu_refill)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(mm_page_pcpu_drain,
|
TRACE_EVENT(mm_page_pcpu_drain,
|
||||||
|
@ -2466,6 +2466,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|||||||
del_page_from_free_list(page, zone, current_order);
|
del_page_from_free_list(page, zone, current_order);
|
||||||
expand(zone, page, order, current_order, migratetype);
|
expand(zone, page, order, current_order, migratetype);
|
||||||
set_pcppage_migratetype(page, migratetype);
|
set_pcppage_migratetype(page, migratetype);
|
||||||
|
trace_mm_page_alloc_zone_locked(page, order, migratetype,
|
||||||
|
pcp_allowed_order(order) &&
|
||||||
|
migratetype < MIGRATE_PCPTYPES);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2989,7 +2992,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
|||||||
zone_page_state(zone, NR_FREE_PAGES) / 2) {
|
zone_page_state(zone, NR_FREE_PAGES) / 2) {
|
||||||
page = __rmqueue_cma_fallback(zone, order);
|
page = __rmqueue_cma_fallback(zone, order);
|
||||||
if (page)
|
if (page)
|
||||||
goto out;
|
return page;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
retry:
|
retry:
|
||||||
@ -3002,9 +3005,6 @@ retry:
|
|||||||
alloc_flags))
|
alloc_flags))
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
if (page)
|
|
||||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3723,11 +3723,8 @@ struct page *rmqueue(struct zone *preferred_zone,
|
|||||||
* reserved for high-order atomic allocation, so order-0
|
* reserved for high-order atomic allocation, so order-0
|
||||||
* request should skip it.
|
* request should skip it.
|
||||||
*/
|
*/
|
||||||
if (order > 0 && alloc_flags & ALLOC_HARDER) {
|
if (order > 0 && alloc_flags & ALLOC_HARDER)
|
||||||
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
||||||
if (page)
|
|
||||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
|
||||||
}
|
|
||||||
if (!page) {
|
if (!page) {
|
||||||
page = __rmqueue(zone, order, migratetype, alloc_flags);
|
page = __rmqueue(zone, order, migratetype, alloc_flags);
|
||||||
if (!page)
|
if (!page)
|
||||||
|
Loading…
Reference in New Issue
Block a user