forked from Minki/linux
Revert "printk: add functions to prefer direct printing"
This reverts commit 2bb2b7b57f
.
The testing of 5.19 release candidates revealed missing synchronization
between early and regular console functionality.
It would be possible to start the console kthreads later as a workaround.
But it is clear that console lock serialized console drivers between
each other. It opens a big area of possible problems that were not
considered by people involved in the development and review.
printk() is crucial for debugging kernel issues and console output is
very important part of it. The number of consoles is huge and a proper
review would take some time. As a result it need to be reverted for 5.19.
Link: https://lore.kernel.org/r/YrBdjVwBOVgLfHyb@alley
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20220623145157.21938-7-pmladek@suse.com
This commit is contained in:
parent
5831788afb
commit
07a22b6194
@ -578,7 +578,6 @@ void __handle_sysrq(int key, bool check_mask)
|
||||
|
||||
rcu_sysrq_start();
|
||||
rcu_read_lock();
|
||||
printk_prefer_direct_enter();
|
||||
/*
|
||||
* Raise the apparent loglevel to maximum so that the sysrq header
|
||||
* is shown to provide the user with positive feedback. We do not
|
||||
@ -620,7 +619,6 @@ void __handle_sysrq(int key, bool check_mask)
|
||||
pr_cont("\n");
|
||||
console_loglevel = orig_log_level;
|
||||
}
|
||||
printk_prefer_direct_exit();
|
||||
rcu_read_unlock();
|
||||
rcu_sysrq_end();
|
||||
|
||||
|
@ -170,9 +170,6 @@ extern void __printk_safe_exit(void);
|
||||
#define printk_deferred_enter __printk_safe_enter
|
||||
#define printk_deferred_exit __printk_safe_exit
|
||||
|
||||
extern void printk_prefer_direct_enter(void);
|
||||
extern void printk_prefer_direct_exit(void);
|
||||
|
||||
extern bool pr_flush(int timeout_ms, bool reset_on_progress);
|
||||
|
||||
/*
|
||||
@ -225,14 +222,6 @@ static inline void printk_deferred_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_prefer_direct_enter(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_prefer_direct_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
|
||||
{
|
||||
return true;
|
||||
|
@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
||||
* complain:
|
||||
*/
|
||||
if (sysctl_hung_task_warnings) {
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
if (sysctl_hung_task_warnings > 0)
|
||||
sysctl_hung_task_warnings--;
|
||||
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
|
||||
@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
||||
|
||||
if (sysctl_hung_task_all_cpu_backtrace)
|
||||
hung_task_show_all_bt = true;
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
touch_nmi_watchdog();
|
||||
@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
||||
}
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
if (hung_task_show_lock) {
|
||||
printk_prefer_direct_enter();
|
||||
if (hung_task_show_lock)
|
||||
debug_show_all_locks();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
if (hung_task_show_all_bt) {
|
||||
hung_task_show_all_bt = false;
|
||||
printk_prefer_direct_enter();
|
||||
trigger_all_cpu_backtrace();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
if (hung_task_call_panic)
|
||||
|
@ -579,8 +579,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
||||
{
|
||||
disable_trace_on_warning();
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
if (file)
|
||||
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
|
||||
raw_smp_processor_id(), current->pid, file, line,
|
||||
@ -610,8 +608,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
||||
|
||||
/* Just a warning, don't kill lockdep. */
|
||||
add_taint(taint, LOCKDEP_STILL_OK);
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
#ifndef __WARN_FLAGS
|
||||
|
@ -362,34 +362,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
|
||||
static DEFINE_MUTEX(syslog_lock);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
|
||||
|
||||
/**
|
||||
* printk_prefer_direct_enter - cause printk() calls to attempt direct
|
||||
* printing to all enabled consoles
|
||||
*
|
||||
* Since it is not possible to call into the console printing code from any
|
||||
* context, there is no guarantee that direct printing will occur.
|
||||
*
|
||||
* This globally effects all printk() callers.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
void printk_prefer_direct_enter(void)
|
||||
{
|
||||
atomic_inc(&printk_prefer_direct);
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_prefer_direct_exit - restore printk() behavior
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
void printk_prefer_direct_exit(void)
|
||||
{
|
||||
WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
|
||||
}
|
||||
|
||||
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
||||
/* All 3 protected by @syslog_lock. */
|
||||
/* the next printk record to read by syslog(READ) or /proc/kmsg */
|
||||
|
@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps)
|
||||
* See Documentation/RCU/stallwarn.rst for info on how to debug
|
||||
* RCU CPU stall warnings.
|
||||
*/
|
||||
printk_prefer_direct_enter();
|
||||
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
|
||||
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
|
||||
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
|
||||
@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps)
|
||||
*/
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
static void check_cpu_stall(struct rcu_data *rdp)
|
||||
|
@ -447,11 +447,9 @@ static int __orderly_reboot(void)
|
||||
ret = run_cmd(reboot_cmd);
|
||||
|
||||
if (ret) {
|
||||
printk_prefer_direct_enter();
|
||||
pr_warn("Failed to start orderly reboot: forcing the issue\n");
|
||||
emergency_sync();
|
||||
kernel_restart(NULL);
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -464,7 +462,6 @@ static int __orderly_poweroff(bool force)
|
||||
ret = run_cmd(poweroff_cmd);
|
||||
|
||||
if (ret && force) {
|
||||
printk_prefer_direct_enter();
|
||||
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
|
||||
|
||||
/*
|
||||
@ -474,7 +471,6 @@ static int __orderly_poweroff(bool force)
|
||||
*/
|
||||
emergency_sync();
|
||||
kernel_power_off();
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -532,8 +528,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
|
||||
*/
|
||||
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
|
||||
{
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
/*
|
||||
* We have reached here after the emergency shutdown waiting period has
|
||||
* expired. This means orderly_poweroff has not been able to shut off
|
||||
@ -550,8 +544,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
|
||||
*/
|
||||
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
|
||||
emergency_restart();
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
|
||||
@ -590,13 +582,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
|
||||
{
|
||||
static atomic_t allow_proceed = ATOMIC_INIT(1);
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
|
||||
|
||||
/* Shutdown should be initiated only once. */
|
||||
if (!atomic_dec_and_test(&allow_proceed))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Queue a backup emergency shutdown in the event of
|
||||
@ -604,8 +594,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
|
||||
*/
|
||||
hw_failure_emergency_poweroff(ms_until_forced);
|
||||
orderly_poweroff(true);
|
||||
out:
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hw_protection_shutdown);
|
||||
|
||||
|
@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
/* Start period for the next softlockup warning. */
|
||||
update_report_ts();
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
smp_processor_id(), duration,
|
||||
current->comm, task_pid_nr(current));
|
||||
@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
|
||||
if (softlockup_panic)
|
||||
panic("softlockup: hung tasks");
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
}
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
|
@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
||||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||
return;
|
||||
|
||||
printk_prefer_direct_enter();
|
||||
|
||||
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
|
||||
this_cpu);
|
||||
print_modules();
|
||||
@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
||||
if (hardlockup_panic)
|
||||
nmi_panic(regs, "Hard LOCKUP");
|
||||
|
||||
printk_prefer_direct_exit();
|
||||
|
||||
__this_cpu_write(hard_watchdog_warn, true);
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user