mm/vmstat: Use preempt_[dis|en]able_nested()

Replace the open coded CONFIG_PREEMPT_RT conditional
preempt_enable/disable() pairs with the new helper functions which hide
the underlying implementation details.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Link: https://lore.kernel.org/r/20220825164131.402717-4-bigeasy@linutronix.de
This commit is contained in:
Thomas Gleixner 2022-08-25 18:41:26 +02:00
parent 93f6d4e189
commit 7a025e91ab

View File

@ -355,8 +355,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
* CPU migrations and preemption potentially corrupts a counter so
* disable preemption.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
preempt_disable_nested();
x = delta + __this_cpu_read(*p);
@ -368,8 +367,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
}
__this_cpu_write(*p, x);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
preempt_enable_nested();
}
EXPORT_SYMBOL(__mod_zone_page_state);
@ -393,8 +391,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
}
/* See __mod_node_page_state */
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
preempt_disable_nested();
x = delta + __this_cpu_read(*p);
@ -406,8 +403,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
}
__this_cpu_write(*p, x);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
preempt_enable_nested();
}
EXPORT_SYMBOL(__mod_node_page_state);
@ -441,8 +437,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 v, t;
/* See __mod_node_page_state */
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
preempt_disable_nested();
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
@ -453,8 +448,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
__this_cpu_write(*p, -overstep);
}
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
preempt_enable_nested();
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
@ -466,8 +460,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
/* See __mod_node_page_state */
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
preempt_disable_nested();
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
@ -478,8 +471,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
__this_cpu_write(*p, -overstep);
}
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
preempt_enable_nested();
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
@ -501,8 +493,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 v, t;
/* See __mod_node_page_state */
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
preempt_disable_nested();
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
@ -513,8 +504,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
__this_cpu_write(*p, overstep);
}
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
preempt_enable_nested();
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
@ -526,8 +516,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
/* See __mod_node_page_state */
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
preempt_disable_nested();
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
@ -538,8 +527,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
__this_cpu_write(*p, overstep);
}
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
preempt_enable_nested();
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)