2019-05-19 13:51:43 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2016-05-21 00:00:33 +00:00
|
|
|
/*
|
|
|
|
* internal.h - printk internal definitions
|
|
|
|
*/
|
2023-09-16 19:20:00 +00:00
|
|
|
#include <linux/console.h>
|
2024-08-20 06:29:41 +00:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/types.h>
|
2016-05-21 00:00:33 +00:00
|
|
|
|
2022-01-22 06:12:33 +00:00
|
|
|
#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
|
2024-08-20 06:29:41 +00:00
|
|
|
struct ctl_table;
|
2022-01-22 06:12:33 +00:00
|
|
|
void __init printk_sysctl_init(void);
|
sysctl: treewide: constify the ctl_table argument of proc_handlers
const qualify the struct ctl_table argument in the proc_handler function
signatures. This is a prerequisite to moving the static ctl_table
structs into .rodata data which will ensure that proc_handler function
pointers cannot be modified.
This patch has been generated by the following coccinelle script:
```
virtual patch
@r1@
identifier ctl, write, buffer, lenp, ppos;
identifier func !~ "appldata_(timer|interval)_handler|sched_(rt|rr)_handler|rds_tcp_skbuf_handler|proc_sctp_do_(hmac_alg|rto_min|rto_max|udp_port|alpha_beta|auth|probe_interval)";
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos);
@r2@
identifier func, ctl, write, buffer, lenp, ppos;
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos)
{ ... }
@r3@
identifier func;
@@
int func(
- struct ctl_table *
+ const struct ctl_table *
,int , void *, size_t *, loff_t *);
@r4@
identifier func, ctl;
@@
int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int , void *, size_t *, loff_t *);
@r5@
identifier func, write, buffer, lenp, ppos;
@@
int func(
- struct ctl_table *
+ const struct ctl_table *
,int write, void *buffer, size_t *lenp, loff_t *ppos);
```
* Code formatting was adjusted in xfs_sysctl.c to comply with code
conventions. The xfs_stats_clear_proc_handler,
xfs_panic_mask_proc_handler and xfs_deprecated_dointvec_minmax where
adjusted.
* The ctl_table argument in proc_watchdog_common was const qualified.
This is called from a proc_handler itself and is calling back into
another proc_handler, making it necessary to change it as part of the
proc_handler migration.
Co-developed-by: Thomas Weißschuh <linux@weissschuh.net>
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Co-developed-by: Joel Granados <j.granados@samsung.com>
Signed-off-by: Joel Granados <j.granados@samsung.com>
2024-07-24 18:59:29 +00:00
|
|
|
int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write,
|
2022-01-22 06:13:34 +00:00
|
|
|
void *buffer, size_t *lenp, loff_t *ppos);
|
2022-01-22 06:12:33 +00:00
|
|
|
#else
|
|
|
|
#define printk_sysctl_init() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2023-09-16 19:20:03 +00:00
|
|
|
#define con_printk(lvl, con, fmt, ...) \
|
|
|
|
printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \
|
|
|
|
(con->flags & CON_NBCON) ? "" : "legacy ", \
|
|
|
|
(con->flags & CON_BOOT) ? "boot" : "", \
|
|
|
|
con->name, con->index, ##__VA_ARGS__)
|
|
|
|
|
2024-09-04 12:05:34 +00:00
|
|
|
/*
|
|
|
|
* Identify if legacy printing is forced in a dedicated kthread. If
|
|
|
|
* true, all printing via console lock occurs within a dedicated
|
|
|
|
* legacy printer thread. The only exception is on panic, after the
|
|
|
|
* nbcon consoles have had their chance to print the panic messages
|
|
|
|
* first.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PREEMPT_RT
|
|
|
|
# define force_legacy_kthread() (true)
|
|
|
|
#else
|
|
|
|
# define force_legacy_kthread() (false)
|
|
|
|
#endif
|
|
|
|
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
#ifdef CONFIG_PRINTK
|
|
|
|
|
2023-01-09 10:07:53 +00:00
|
|
|
#ifdef CONFIG_PRINTK_CALLER
|
printk: adjust string limit macros
The various internal size limit macros have names and/or values that
do not fit well to their current usage.
Rename the macros so that their purpose is clear and, if needed,
provide a more appropriate value. In general, the new macros and
values will lead to less memory usage. The new macros are...
PRINTK_MESSAGE_MAX:
This is the maximum size for a formatted message on a console,
devkmsg, or syslog. It does not matter which format the message has
(normal or extended). It replaces the use of CONSOLE_EXT_LOG_MAX for
console and devkmsg. It replaces the use of CONSOLE_LOG_MAX for
syslog.
Historically, normal messages have been allowed to print up to 1kB,
whereas extended messages have been allowed to print up to 8kB.
However, the difference in lengths of these message types is not
significant and in multi-line records, normal messages are probably
larger. Also, because 1kB is only slightly above the allowed record
size, multi-line normal messages could be easily truncated during
formatting.
This new macro should be significantly larger than the allowed
record size to allow sufficient space for extended or multi-line
prefix text. A value of 2kB should be plenty of space. For normal
messages this represents a doubling of the historically allowed
amount. For extended messages it reduces the excessive 8kB size,
thus reducing memory usage needed for message formatting.
PRINTK_PREFIX_MAX:
This is the maximum size allowed for a record prefix (used by
console and syslog). It replaces PREFIX_MAX. The value is left
unchanged.
PRINTKRB_RECORD_MAX:
This is the maximum size allowed to be reserved for a record in the
ringbuffer. It is used by all readers and writers with the printk
ringbuffer. It replaces LOG_LINE_MAX.
Previously this was set to "1kB - PREFIX_MAX", which makes some
sense if 1kB is the limit for normal message output and prefixes are
enabled. However, with the allowance of larger output and the
existence of multi-line records, the value is rather bizarre.
Round the value up to 1kB.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20230109100800.1085541-9-john.ogness@linutronix.de
2023-01-09 10:08:00 +00:00
|
|
|
#define PRINTK_PREFIX_MAX 48
|
2023-01-09 10:07:53 +00:00
|
|
|
#else
|
printk: adjust string limit macros
The various internal size limit macros have names and/or values that
do not fit well to their current usage.
Rename the macros so that their purpose is clear and, if needed,
provide a more appropriate value. In general, the new macros and
values will lead to less memory usage. The new macros are...
PRINTK_MESSAGE_MAX:
This is the maximum size for a formatted message on a console,
devkmsg, or syslog. It does not matter which format the message has
(normal or extended). It replaces the use of CONSOLE_EXT_LOG_MAX for
console and devkmsg. It replaces the use of CONSOLE_LOG_MAX for
syslog.
Historically, normal messages have been allowed to print up to 1kB,
whereas extended messages have been allowed to print up to 8kB.
However, the difference in lengths of these message types is not
significant and in multi-line records, normal messages are probably
larger. Also, because 1kB is only slightly above the allowed record
size, multi-line normal messages could be easily truncated during
formatting.
This new macro should be significantly larger than the allowed
record size to allow sufficient space for extended or multi-line
prefix text. A value of 2kB should be plenty of space. For normal
messages this represents a doubling of the historically allowed
amount. For extended messages it reduces the excessive 8kB size,
thus reducing memory usage needed for message formatting.
PRINTK_PREFIX_MAX:
This is the maximum size allowed for a record prefix (used by
console and syslog). It replaces PREFIX_MAX. The value is left
unchanged.
PRINTKRB_RECORD_MAX:
This is the maximum size allowed to be reserved for a record in the
ringbuffer. It is used by all readers and writers with the printk
ringbuffer. It replaces LOG_LINE_MAX.
Previously this was set to "1kB - PREFIX_MAX", which makes some
sense if 1kB is the limit for normal message output and prefixes are
enabled. However, with the allowance of larger output and the
existence of multi-line records, the value is rather bizarre.
Round the value up to 1kB.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20230109100800.1085541-9-john.ogness@linutronix.de
2023-01-09 10:08:00 +00:00
|
|
|
#define PRINTK_PREFIX_MAX 32
|
2023-01-09 10:07:53 +00:00
|
|
|
#endif
|
|
|
|
|
printk: adjust string limit macros
The various internal size limit macros have names and/or values that
do not fit well to their current usage.
Rename the macros so that their purpose is clear and, if needed,
provide a more appropriate value. In general, the new macros and
values will lead to less memory usage. The new macros are...
PRINTK_MESSAGE_MAX:
This is the maximum size for a formatted message on a console,
devkmsg, or syslog. It does not matter which format the message has
(normal or extended). It replaces the use of CONSOLE_EXT_LOG_MAX for
console and devkmsg. It replaces the use of CONSOLE_LOG_MAX for
syslog.
Historically, normal messages have been allowed to print up to 1kB,
whereas extended messages have been allowed to print up to 8kB.
However, the difference in lengths of these message types is not
significant and in multi-line records, normal messages are probably
larger. Also, because 1kB is only slightly above the allowed record
size, multi-line normal messages could be easily truncated during
formatting.
This new macro should be significantly larger than the allowed
record size to allow sufficient space for extended or multi-line
prefix text. A value of 2kB should be plenty of space. For normal
messages this represents a doubling of the historically allowed
amount. For extended messages it reduces the excessive 8kB size,
thus reducing memory usage needed for message formatting.
PRINTK_PREFIX_MAX:
This is the maximum size allowed for a record prefix (used by
console and syslog). It replaces PREFIX_MAX. The value is left
unchanged.
PRINTKRB_RECORD_MAX:
This is the maximum size allowed to be reserved for a record in the
ringbuffer. It is used by all readers and writers with the printk
ringbuffer. It replaces LOG_LINE_MAX.
Previously this was set to "1kB - PREFIX_MAX", which makes some
sense if 1kB is the limit for normal message output and prefixes are
enabled. However, with the allowance of larger output and the
existence of multi-line records, the value is rather bizarre.
Round the value up to 1kB.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20230109100800.1085541-9-john.ogness@linutronix.de
2023-01-09 10:08:00 +00:00
|
|
|
/*
|
|
|
|
* the maximum size of a formatted record (i.e. with prefix added
|
|
|
|
* per line and dropped messages or in extended message format)
|
|
|
|
*/
|
|
|
|
#define PRINTK_MESSAGE_MAX 2048
|
2023-01-09 10:07:53 +00:00
|
|
|
|
|
|
|
/* the maximum size allowed to be reserved for a record */
|
printk: adjust string limit macros
The various internal size limit macros have names and/or values that
do not fit well to their current usage.
Rename the macros so that their purpose is clear and, if needed,
provide a more appropriate value. In general, the new macros and
values will lead to less memory usage. The new macros are...
PRINTK_MESSAGE_MAX:
This is the maximum size for a formatted message on a console,
devkmsg, or syslog. It does not matter which format the message has
(normal or extended). It replaces the use of CONSOLE_EXT_LOG_MAX for
console and devkmsg. It replaces the use of CONSOLE_LOG_MAX for
syslog.
Historically, normal messages have been allowed to print up to 1kB,
whereas extended messages have been allowed to print up to 8kB.
However, the difference in lengths of these message types is not
significant and in multi-line records, normal messages are probably
larger. Also, because 1kB is only slightly above the allowed record
size, multi-line normal messages could be easily truncated during
formatting.
This new macro should be significantly larger than the allowed
record size to allow sufficient space for extended or multi-line
prefix text. A value of 2kB should be plenty of space. For normal
messages this represents a doubling of the historically allowed
amount. For extended messages it reduces the excessive 8kB size,
thus reducing memory usage needed for message formatting.
PRINTK_PREFIX_MAX:
This is the maximum size allowed for a record prefix (used by
console and syslog). It replaces PREFIX_MAX. The value is left
unchanged.
PRINTKRB_RECORD_MAX:
This is the maximum size allowed to be reserved for a record in the
ringbuffer. It is used by all readers and writers with the printk
ringbuffer. It replaces LOG_LINE_MAX.
Previously this was set to "1kB - PREFIX_MAX", which makes some
sense if 1kB is the limit for normal message output and prefixes are
enabled. However, with the allowance of larger output and the
existence of multi-line records, the value is rather bizarre.
Round the value up to 1kB.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20230109100800.1085541-9-john.ogness@linutronix.de
2023-01-09 10:08:00 +00:00
|
|
|
#define PRINTKRB_RECORD_MAX 1024
|
2023-01-09 10:07:53 +00:00
|
|
|
|
2021-06-15 16:52:48 +00:00
|
|
|
/* Flags for a single printk record. */
|
|
|
|
enum printk_info_flags {
|
|
|
|
LOG_NEWLINE = 2, /* text ended with a newline */
|
|
|
|
LOG_CONT = 8, /* text is a fragment of a continuation line */
|
|
|
|
};
|
|
|
|
|
2024-08-20 06:29:41 +00:00
|
|
|
struct printk_ringbuffer;
|
|
|
|
struct dev_printk_info;
|
|
|
|
|
2023-09-16 19:20:05 +00:00
|
|
|
extern struct printk_ringbuffer *prb;
|
printk: nbcon: Introduce printer kthreads
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware. This
includes:
- new mandatory write_thread() callback
- kthread creation
- kthread main printing loop
- kthread wakeup mechanism
- kthread shutdown
kthread creation is a bit tricky because consoles may register
before kthreads can be created. In such cases, registration
will succeed, even though no kthread exists. Once kthreads can
be created, an early_initcall will set @printk_kthreads_ready.
If there are no registered boot consoles, the early_initcall
creates the kthreads for all registered nbcon consoles. If
kthread creation fails, the related console is unregistered.
If there are registered boot consoles when
@printk_kthreads_ready is set, no kthreads are created until
the final boot console unregisters.
Once kthread creation finally occurs, @printk_kthreads_running
is set so that the system knows kthreads are available for all
registered nbcon consoles.
If @printk_kthreads_running is already set when the console
is registering, the kthread is created during registration. If
kthread creation fails, the registration will fail.
Until @printk_kthreads_running is set, console printing occurs
directly via the console_lock.
kthread shutdown on system shutdown/reboot is necessary to
ensure the printer kthreads finish their printing so that the
system can cleanly transition back to direct printing via the
console_lock in order to reliably push out the final
shutdown/reboot messages. @printk_kthreads_running is cleared
before shutting down the individual kthreads.
The kthread uses a new mandatory write_thread() callback that
is called with both device_lock() and the console context
acquired.
The console ownership handling is necessary for synchronization
against write_atomic() which is synchronized only via the
console context ownership.
The device_lock() serializes acquiring the console context with
NBCON_PRIO_NORMAL. It is needed in case the device_lock() does
not disable preemption. It prevents the following race:
CPU0 CPU1
[ task A ]
nbcon_context_try_acquire()
# success with NORMAL prio
# .unsafe == false; // safe for takeover
[ schedule: task A -> B ]
WARN_ON()
nbcon_atomic_flush_pending()
nbcon_context_try_acquire()
# success with EMERGENCY prio
# flushing
nbcon_context_release()
# HERE: con->nbcon_state is free
# to take by anyone !!!
nbcon_context_try_acquire()
# success with NORMAL prio [ task B ]
[ schedule: task B -> A ]
nbcon_enter_unsafe()
nbcon_context_can_proceed()
BUG: nbcon_context_can_proceed() returns "true" because
the console is owned by a context on CPU0 with
NBCON_PRIO_NORMAL.
But it should return "false". The console is owned
by a context from task B and we do the check
in a context from task A.
Note that with these changes, the printer kthreads do not yet
take over full responsibility for nbcon printing during normal
operation. These changes only focus on the lifecycle of the
kthreads.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-7-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
2024-09-04 12:05:25 +00:00
|
|
|
extern bool printk_kthreads_running;
|
2023-09-16 19:20:05 +00:00
|
|
|
|
2020-09-21 11:18:45 +00:00
|
|
|
__printf(4, 0)
|
2018-06-27 14:20:28 +00:00
|
|
|
int vprintk_store(int facility, int level,
|
2020-09-21 11:18:45 +00:00
|
|
|
const struct dev_printk_info *dev_info,
|
2018-06-27 14:20:28 +00:00
|
|
|
const char *fmt, va_list args);
|
|
|
|
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
|
2017-04-20 08:52:31 +00:00
|
|
|
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
|
2024-08-20 06:29:30 +00:00
|
|
|
void __printk_safe_enter(void);
|
|
|
|
void __printk_safe_exit(void);
|
|
|
|
|
printk: queue wake_up_klogd irq_work only if per-CPU areas are ready
printk_deferred(), similarly to printk_safe/printk_nmi, does not
immediately attempt to print a new message on the consoles, avoiding
calls into non-reentrant kernel paths, e.g. scheduler or timekeeping,
which potentially can deadlock the system.
Those printk() flavors, instead, rely on per-CPU flush irq_work to print
messages from safer contexts. For same reasons (recursive scheduler or
timekeeping calls) printk() uses per-CPU irq_work in order to wake up
user space syslog/kmsg readers.
However, only printk_safe/printk_nmi do make sure that per-CPU areas
have been initialised and that it's safe to modify per-CPU irq_work.
This means that, for instance, should printk_deferred() be invoked "too
early", that is before per-CPU areas are initialised, printk_deferred()
will perform illegal per-CPU access.
Lech Perczak [0] reports that after commit 1b710b1b10ef ("char/random:
silence a lockdep splat with printk()") user-space syslog/kmsg readers
are not able to read new kernel messages.
The reason is printk_deferred() being called too early (as was pointed
out by Petr and John).
Fix printk_deferred() and do not queue per-CPU irq_work before per-CPU
areas are initialized.
Link: https://lore.kernel.org/lkml/aa0732c6-5c4e-8a8b-a1c1-75ebe3dca05b@camlintechnologies.com/
Reported-by: Lech Perczak <l.perczak@camlintechnologies.com>
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Tested-by: Jann Horn <jannh@google.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-03-03 11:30:02 +00:00
|
|
|
bool printk_percpu_data_ready(void);
|
|
|
|
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
#define printk_safe_enter_irqsave(flags) \
|
|
|
|
do { \
|
|
|
|
local_irq_save(flags); \
|
|
|
|
__printk_safe_enter(); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define printk_safe_exit_irqrestore(flags) \
|
|
|
|
do { \
|
|
|
|
__printk_safe_exit(); \
|
|
|
|
local_irq_restore(flags); \
|
|
|
|
} while (0)
|
|
|
|
|
2018-06-27 14:20:28 +00:00
|
|
|
void defer_console_output(void);
|
2024-08-20 06:29:49 +00:00
|
|
|
bool is_printk_legacy_deferred(void);
|
2018-06-27 14:20:28 +00:00
|
|
|
|
2021-06-15 16:52:51 +00:00
|
|
|
u16 printk_parse_prefix(const char *text, int *level,
|
|
|
|
enum printk_info_flags *flags);
|
2024-08-20 06:29:48 +00:00
|
|
|
void console_lock_spinning_enable(void);
|
|
|
|
int console_lock_spinning_disable_and_check(int cookie);
|
2023-09-16 19:20:00 +00:00
|
|
|
|
2023-09-16 19:20:05 +00:00
|
|
|
u64 nbcon_seq_read(struct console *con);
|
|
|
|
void nbcon_seq_force(struct console *con, u64 seq);
|
2023-09-16 19:20:03 +00:00
|
|
|
bool nbcon_alloc(struct console *con);
|
|
|
|
void nbcon_free(struct console *con);
|
2024-08-20 06:29:45 +00:00
|
|
|
enum nbcon_prio nbcon_get_default_prio(void);
|
2024-08-20 06:29:46 +00:00
|
|
|
void nbcon_atomic_flush_pending(void);
|
2024-08-20 06:29:48 +00:00
|
|
|
bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
2024-09-04 12:05:27 +00:00
|
|
|
int cookie, bool use_atomic);
|
printk: nbcon: Introduce printer kthreads
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware. This
includes:
- new mandatory write_thread() callback
- kthread creation
- kthread main printing loop
- kthread wakeup mechanism
- kthread shutdown
kthread creation is a bit tricky because consoles may register
before kthreads can be created. In such cases, registration
will succeed, even though no kthread exists. Once kthreads can
be created, an early_initcall will set @printk_kthreads_ready.
If there are no registered boot consoles, the early_initcall
creates the kthreads for all registered nbcon consoles. If
kthread creation fails, the related console is unregistered.
If there are registered boot consoles when
@printk_kthreads_ready is set, no kthreads are created until
the final boot console unregisters.
Once kthread creation finally occurs, @printk_kthreads_running
is set so that the system knows kthreads are available for all
registered nbcon consoles.
If @printk_kthreads_running is already set when the console
is registering, the kthread is created during registration. If
kthread creation fails, the registration will fail.
Until @printk_kthreads_running is set, console printing occurs
directly via the console_lock.
kthread shutdown on system shutdown/reboot is necessary to
ensure the printer kthreads finish their printing so that the
system can cleanly transition back to direct printing via the
console_lock in order to reliably push out the final
shutdown/reboot messages. @printk_kthreads_running is cleared
before shutting down the individual kthreads.
The kthread uses a new mandatory write_thread() callback that
is called with both device_lock() and the console context
acquired.
The console ownership handling is necessary for synchronization
against write_atomic() which is synchronized only via the
console context ownership.
The device_lock() serializes acquiring the console context with
NBCON_PRIO_NORMAL. It is needed in case the device_lock() does
not disable preemption. It prevents the following race:
CPU0 CPU1
[ task A ]
nbcon_context_try_acquire()
# success with NORMAL prio
# .unsafe == false; // safe for takeover
[ schedule: task A -> B ]
WARN_ON()
nbcon_atomic_flush_pending()
nbcon_context_try_acquire()
# success with EMERGENCY prio
# flushing
nbcon_context_release()
# HERE: con->nbcon_state is free
# to take by anyone !!!
nbcon_context_try_acquire()
# success with NORMAL prio [ task B ]
[ schedule: task B -> A ]
nbcon_enter_unsafe()
nbcon_context_can_proceed()
BUG: nbcon_context_can_proceed() returns "true" because
the console is owned by a context on CPU0 with
NBCON_PRIO_NORMAL.
But it should return "false". The console is owned
by a context from task B and we do the check
in a context from task A.
Note that with these changes, the printer kthreads do not yet
take over full responsibility for nbcon printing during normal
operation. These changes only focus on the lifecycle of the
kthreads.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-7-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
2024-09-04 12:05:25 +00:00
|
|
|
bool nbcon_kthread_create(struct console *con);
|
|
|
|
void nbcon_kthread_stop(struct console *con);
|
|
|
|
void nbcon_kthreads_wake(void);
|
2023-09-16 19:20:00 +00:00
|
|
|
|
2024-08-20 06:29:42 +00:00
|
|
|
/*
|
|
|
|
* Check if the given console is currently capable and allowed to print
|
2024-08-20 06:29:43 +00:00
|
|
|
* records. Note that this function does not consider the current context,
|
|
|
|
* which can also play a role in deciding if @con can be used to print
|
2024-08-20 06:29:42 +00:00
|
|
|
* records.
|
|
|
|
*/
|
2024-09-04 12:05:23 +00:00
|
|
|
static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
|
2024-08-20 06:29:42 +00:00
|
|
|
{
|
|
|
|
if (!(flags & CON_ENABLED))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ((flags & CON_SUSPENDED))
|
|
|
|
return false;
|
|
|
|
|
2024-08-20 06:29:43 +00:00
|
|
|
if (flags & CON_NBCON) {
|
2024-09-04 12:05:23 +00:00
|
|
|
/* The write_atomic() callback is optional. */
|
|
|
|
if (use_atomic && !con->write_atomic)
|
2024-08-20 06:29:43 +00:00
|
|
|
return false;
|
2024-09-04 12:05:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For the !use_atomic case, @printk_kthreads_running is not
|
|
|
|
* checked because the write_thread() callback is also used
|
|
|
|
* via the legacy loop when the printer threads are not
|
|
|
|
* available.
|
|
|
|
*/
|
2024-08-20 06:29:43 +00:00
|
|
|
} else {
|
|
|
|
if (!con->write)
|
|
|
|
return false;
|
|
|
|
}
|
2024-08-20 06:29:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Console drivers may assume that per-cpu resources have been
|
|
|
|
* allocated. So unless they're explicitly marked as being able to
|
|
|
|
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
|
|
|
|
*/
|
|
|
|
if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
printk: nbcon: Introduce printer kthreads
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware. This
includes:
- new mandatory write_thread() callback
- kthread creation
- kthread main printing loop
- kthread wakeup mechanism
- kthread shutdown
kthread creation is a bit tricky because consoles may register
before kthreads can be created. In such cases, registration
will succeed, even though no kthread exists. Once kthreads can
be created, an early_initcall will set @printk_kthreads_ready.
If there are no registered boot consoles, the early_initcall
creates the kthreads for all registered nbcon consoles. If
kthread creation fails, the related console is unregistered.
If there are registered boot consoles when
@printk_kthreads_ready is set, no kthreads are created until
the final boot console unregisters.
Once kthread creation finally occurs, @printk_kthreads_running
is set so that the system knows kthreads are available for all
registered nbcon consoles.
If @printk_kthreads_running is already set when the console
is registering, the kthread is created during registration. If
kthread creation fails, the registration will fail.
Until @printk_kthreads_running is set, console printing occurs
directly via the console_lock.
kthread shutdown on system shutdown/reboot is necessary to
ensure the printer kthreads finish their printing so that the
system can cleanly transition back to direct printing via the
console_lock in order to reliably push out the final
shutdown/reboot messages. @printk_kthreads_running is cleared
before shutting down the individual kthreads.
The kthread uses a new mandatory write_thread() callback that
is called with both device_lock() and the console context
acquired.
The console ownership handling is necessary for synchronization
against write_atomic() which is synchronized only via the
console context ownership.
The device_lock() serializes acquiring the console context with
NBCON_PRIO_NORMAL. It is needed in case the device_lock() does
not disable preemption. It prevents the following race:
CPU0 CPU1
[ task A ]
nbcon_context_try_acquire()
# success with NORMAL prio
# .unsafe == false; // safe for takeover
[ schedule: task A -> B ]
WARN_ON()
nbcon_atomic_flush_pending()
nbcon_context_try_acquire()
# success with EMERGENCY prio
# flushing
nbcon_context_release()
# HERE: con->nbcon_state is free
# to take by anyone !!!
nbcon_context_try_acquire()
# success with NORMAL prio [ task B ]
[ schedule: task B -> A ]
nbcon_enter_unsafe()
nbcon_context_can_proceed()
BUG: nbcon_context_can_proceed() returns "true" because
the console is owned by a context on CPU0 with
NBCON_PRIO_NORMAL.
But it should return "false". The console is owned
by a context from task B and we do the check
in a context from task A.
Note that with these changes, the printer kthreads do not yet
take over full responsibility for nbcon printing during normal
operation. These changes only focus on the lifecycle of the
kthreads.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-7-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
2024-09-04 12:05:25 +00:00
|
|
|
/**
|
|
|
|
* nbcon_kthread_wake - Wake up a console printing thread
|
|
|
|
* @con: Console to operate on
|
|
|
|
*/
|
|
|
|
static inline void nbcon_kthread_wake(struct console *con)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Guarantee any new records can be seen by tasks preparing to wait
|
|
|
|
* before this context checks if the rcuwait is empty.
|
|
|
|
*
|
|
|
|
* The full memory barrier in rcuwait_wake_up() pairs with the full
|
|
|
|
* memory barrier within set_current_state() of
|
|
|
|
* ___rcuwait_wait_event(), which is called after prepare_to_rcuwait()
|
|
|
|
* adds the waiter but before it has checked the wait condition.
|
|
|
|
*
|
|
|
|
* This pairs with nbcon_kthread_func:A.
|
|
|
|
*/
|
|
|
|
rcuwait_wake_up(&con->rcuwait); /* LMM(nbcon_kthread_wake:A) */
|
|
|
|
}
|
|
|
|
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
#else
|
|
|
|
|
printk: adjust string limit macros
The various internal size limit macros have names and/or values that
do not fit well to their current usage.
Rename the macros so that their purpose is clear and, if needed,
provide a more appropriate value. In general, the new macros and
values will lead to less memory usage. The new macros are...
PRINTK_MESSAGE_MAX:
This is the maximum size for a formatted message on a console,
devkmsg, or syslog. It does not matter which format the message has
(normal or extended). It replaces the use of CONSOLE_EXT_LOG_MAX for
console and devkmsg. It replaces the use of CONSOLE_LOG_MAX for
syslog.
Historically, normal messages have been allowed to print up to 1kB,
whereas extended messages have been allowed to print up to 8kB.
However, the difference in lengths of these message types is not
significant and in multi-line records, normal messages are probably
larger. Also, because 1kB is only slightly above the allowed record
size, multi-line normal messages could be easily truncated during
formatting.
This new macro should be significantly larger than the allowed
record size to allow sufficient space for extended or multi-line
prefix text. A value of 2kB should be plenty of space. For normal
messages this represents a doubling of the historically allowed
amount. For extended messages it reduces the excessive 8kB size,
thus reducing memory usage needed for message formatting.
PRINTK_PREFIX_MAX:
This is the maximum size allowed for a record prefix (used by
console and syslog). It replaces PREFIX_MAX. The value is left
unchanged.
PRINTKRB_RECORD_MAX:
This is the maximum size allowed to be reserved for a record in the
ringbuffer. It is used by all readers and writers with the printk
ringbuffer. It replaces LOG_LINE_MAX.
Previously this was set to "1kB - PREFIX_MAX", which makes some
sense if 1kB is the limit for normal message output and prefixes are
enabled. However, with the allowance of larger output and the
existence of multi-line records, the value is rather bizarre.
Round the value up to 1kB.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20230109100800.1085541-9-john.ogness@linutronix.de
2023-01-09 10:08:00 +00:00
|
|
|
#define PRINTK_PREFIX_MAX 0
|
|
|
|
#define PRINTK_MESSAGE_MAX 0
|
|
|
|
#define PRINTKRB_RECORD_MAX 0
|
2023-01-09 10:07:53 +00:00
|
|
|
|
printk: nbcon: Introduce printer kthreads
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware. This
includes:
- new mandatory write_thread() callback
- kthread creation
- kthread main printing loop
- kthread wakeup mechanism
- kthread shutdown
kthread creation is a bit tricky because consoles may register
before kthreads can be created. In such cases, registration
will succeed, even though no kthread exists. Once kthreads can
be created, an early_initcall will set @printk_kthreads_ready.
If there are no registered boot consoles, the early_initcall
creates the kthreads for all registered nbcon consoles. If
kthread creation fails, the related console is unregistered.
If there are registered boot consoles when
@printk_kthreads_ready is set, no kthreads are created until
the final boot console unregisters.
Once kthread creation finally occurs, @printk_kthreads_running
is set so that the system knows kthreads are available for all
registered nbcon consoles.
If @printk_kthreads_running is already set when the console
is registering, the kthread is created during registration. If
kthread creation fails, the registration will fail.
Until @printk_kthreads_running is set, console printing occurs
directly via the console_lock.
kthread shutdown on system shutdown/reboot is necessary to
ensure the printer kthreads finish their printing so that the
system can cleanly transition back to direct printing via the
console_lock in order to reliably push out the final
shutdown/reboot messages. @printk_kthreads_running is cleared
before shutting down the individual kthreads.
The kthread uses a new mandatory write_thread() callback that
is called with both device_lock() and the console context
acquired.
The console ownership handling is necessary for synchronization
against write_atomic() which is synchronized only via the
console context ownership.
The device_lock() serializes acquiring the console context with
NBCON_PRIO_NORMAL. It is needed in case the device_lock() does
not disable preemption. It prevents the following race:
CPU0 CPU1
[ task A ]
nbcon_context_try_acquire()
# success with NORMAL prio
# .unsafe == false; // safe for takeover
[ schedule: task A -> B ]
WARN_ON()
nbcon_atomic_flush_pending()
nbcon_context_try_acquire()
# success with EMERGENCY prio
# flushing
nbcon_context_release()
# HERE: con->nbcon_state is free
# to take by anyone !!!
nbcon_context_try_acquire()
# success with NORMAL prio [ task B ]
[ schedule: task B -> A ]
nbcon_enter_unsafe()
nbcon_context_can_proceed()
BUG: nbcon_context_can_proceed() returns "true" because
the console is owned by a context on CPU0 with
NBCON_PRIO_NORMAL.
But it should return "false". The console is owned
by a context from task B and we do the check
in a context from task A.
Note that with these changes, the printer kthreads do not yet
take over full responsibility for nbcon printing during normal
operation. These changes only focus on the lifecycle of the
kthreads.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-7-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
2024-09-04 12:05:25 +00:00
|
|
|
#define printk_kthreads_running (false)
|
|
|
|
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
/*
|
2021-03-03 10:15:26 +00:00
|
|
|
* In !PRINTK builds we still export console_sem
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
* semaphore and some of console functions (console_unlock()/etc.), so
|
|
|
|
* printk-safe must preserve the existing local IRQ guarantees.
|
|
|
|
*/
|
|
|
|
#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
|
|
|
|
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
|
|
|
|
|
printk: queue wake_up_klogd irq_work only if per-CPU areas are ready
printk_deferred(), similarly to printk_safe/printk_nmi, does not
immediately attempt to print a new message on the consoles, avoiding
calls into non-reentrant kernel paths, e.g. scheduler or timekeeping,
which potentially can deadlock the system.
Those printk() flavors, instead, rely on per-CPU flush irq_work to print
messages from safer contexts. For same reasons (recursive scheduler or
timekeeping calls) printk() uses per-CPU irq_work in order to wake up
user space syslog/kmsg readers.
However, only printk_safe/printk_nmi do make sure that per-CPU areas
have been initialised and that it's safe to modify per-CPU irq_work.
This means that, for instance, should printk_deferred() be invoked "too
early", that is before per-CPU areas are initialised, printk_deferred()
will perform illegal per-CPU access.
Lech Perczak [0] reports that after commit 1b710b1b10ef ("char/random:
silence a lockdep splat with printk()") user-space syslog/kmsg readers
are not able to read new kernel messages.
The reason is printk_deferred() being called too early (as was pointed
out by Petr and John).
Fix printk_deferred() and do not queue per-CPU irq_work before per-CPU
areas are initialized.
Link: https://lore.kernel.org/lkml/aa0732c6-5c4e-8a8b-a1c1-75ebe3dca05b@camlintechnologies.com/
Reported-by: Lech Perczak <l.perczak@camlintechnologies.com>
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Tested-by: Jann Horn <jannh@google.com>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-03-03 11:30:02 +00:00
|
|
|
static inline bool printk_percpu_data_ready(void) { return false; }
|
2024-09-04 12:05:34 +00:00
|
|
|
static inline void defer_console_output(void) { }
|
2024-08-20 06:29:49 +00:00
|
|
|
static inline bool is_printk_legacy_deferred(void) { return false; }
|
2023-09-16 19:20:05 +00:00
|
|
|
static inline u64 nbcon_seq_read(struct console *con) { return 0; }
|
|
|
|
static inline void nbcon_seq_force(struct console *con, u64 seq) { }
|
2023-09-16 19:20:03 +00:00
|
|
|
static inline bool nbcon_alloc(struct console *con) { return false; }
|
|
|
|
static inline void nbcon_free(struct console *con) { }
|
2024-08-20 06:29:45 +00:00
|
|
|
static inline enum nbcon_prio nbcon_get_default_prio(void) { return NBCON_PRIO_NONE; }
|
2024-08-20 06:29:46 +00:00
|
|
|
static inline void nbcon_atomic_flush_pending(void) { }
|
2024-08-20 06:29:48 +00:00
|
|
|
static inline bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
|
2024-09-04 12:05:27 +00:00
|
|
|
int cookie, bool use_atomic) { return false; }
|
printk: nbcon: Introduce printer kthreads
Provide the main implementation for running a printer kthread
per nbcon console that is takeover/handover aware. This
includes:
- new mandatory write_thread() callback
- kthread creation
- kthread main printing loop
- kthread wakeup mechanism
- kthread shutdown
kthread creation is a bit tricky because consoles may register
before kthreads can be created. In such cases, registration
will succeed, even though no kthread exists. Once kthreads can
be created, an early_initcall will set @printk_kthreads_ready.
If there are no registered boot consoles, the early_initcall
creates the kthreads for all registered nbcon consoles. If
kthread creation fails, the related console is unregistered.
If there are registered boot consoles when
@printk_kthreads_ready is set, no kthreads are created until
the final boot console unregisters.
Once kthread creation finally occurs, @printk_kthreads_running
is set so that the system knows kthreads are available for all
registered nbcon consoles.
If @printk_kthreads_running is already set when the console
is registering, the kthread is created during registration. If
kthread creation fails, the registration will fail.
Until @printk_kthreads_running is set, console printing occurs
directly via the console_lock.
kthread shutdown on system shutdown/reboot is necessary to
ensure the printer kthreads finish their printing so that the
system can cleanly transition back to direct printing via the
console_lock in order to reliably push out the final
shutdown/reboot messages. @printk_kthreads_running is cleared
before shutting down the individual kthreads.
The kthread uses a new mandatory write_thread() callback that
is called with both device_lock() and the console context
acquired.
The console ownership handling is necessary for synchronization
against write_atomic() which is synchronized only via the
console context ownership.
The device_lock() serializes acquiring the console context with
NBCON_PRIO_NORMAL. It is needed in case the device_lock() does
not disable preemption. It prevents the following race:
CPU0 CPU1
[ task A ]
nbcon_context_try_acquire()
# success with NORMAL prio
# .unsafe == false; // safe for takeover
[ schedule: task A -> B ]
WARN_ON()
nbcon_atomic_flush_pending()
nbcon_context_try_acquire()
# success with EMERGENCY prio
# flushing
nbcon_context_release()
# HERE: con->nbcon_state is free
# to take by anyone !!!
nbcon_context_try_acquire()
# success with NORMAL prio [ task B ]
[ schedule: task B -> A ]
nbcon_enter_unsafe()
nbcon_context_can_proceed()
BUG: nbcon_context_can_proceed() returns "true" because
the console is owned by a context on CPU0 with
NBCON_PRIO_NORMAL.
But it should return "false". The console is owned
by a context from task B and we do the check
in a context from task A.
Note that with these changes, the printer kthreads do not yet
take over full responsibility for nbcon printing during normal
operation. These changes only focus on the lifecycle of the
kthreads.
Co-developed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Thomas Gleixner (Intel) <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20240904120536.115780-7-john.ogness@linutronix.de
Signed-off-by: Petr Mladek <pmladek@suse.com>
2024-09-04 12:05:25 +00:00
|
|
|
static inline void nbcon_kthread_wake(struct console *con) { }
|
2024-09-04 12:05:28 +00:00
|
|
|
static inline void nbcon_kthreads_wake(void) { }
|
2023-09-16 19:20:00 +00:00
|
|
|
|
2024-09-04 12:05:23 +00:00
|
|
|
static inline bool console_is_usable(struct console *con, short flags,
|
|
|
|
bool use_atomic) { return false; }
|
2024-08-20 06:29:42 +00:00
|
|
|
|
printk: introduce per-cpu safe_print seq buffer
This patch extends the idea of NMI per-cpu buffers to regions
that may cause recursive printk() calls and possible deadlocks.
Namely, printk() can't handle printk calls from schedule code
or printk() calls from lock debugging code (spin_dump() for instance);
because those may be called with `sem->lock' already taken or any
other `critical' locks (p->pi_lock, etc.). An example of deadlock
can be
vprintk_emit()
console_unlock()
up() << raw_spin_lock_irqsave(&sem->lock, flags);
wake_up_process()
try_to_wake_up()
ttwu_queue()
ttwu_activate()
activate_task()
enqueue_task()
enqueue_task_fair()
cfs_rq_of()
task_of()
WARN_ON_ONCE(!entity_is_task(se))
vprintk_emit()
console_trylock()
down_trylock()
raw_spin_lock_irqsave(&sem->lock, flags)
^^^^ deadlock
and some other cases.
Just like in NMI implementation, the solution uses a per-cpu
`printk_func' pointer to 'redirect' printk() calls to a 'safe'
callback, that store messages in a per-cpu buffer and flushes
them back to logbuf buffer later.
Usage example:
printk()
printk_safe_enter_irqsave(flags)
//
// any printk() call from here will endup in vprintk_safe(),
// that stores messages in a special per-CPU buffer.
//
printk_safe_exit_irqrestore(flags)
The 'redirection' mechanism, though, has been reworked, as suggested
by Petr Mladek. Instead of using a per-cpu @print_func callback we now
keep a per-cpu printk-context variable and call either default or nmi
vprintk function depending on its value. printk_nmi_entrer/exit and
printk_safe_enter/exit, thus, just set/celar corresponding bits in
printk-context functions.
The patch only adds printk_safe support, we don't use it yet.
Link: http://lkml.kernel.org/r/20161227141611.940-4-sergey.senozhatsky@gmail.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Calvin Owens <calvinowens@fb.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2016-12-27 14:16:06 +00:00
|
|
|
#endif /* CONFIG_PRINTK */
|
2023-01-09 10:07:56 +00:00
|
|
|
|
2024-08-20 06:29:50 +00:00
|
|
|
extern bool have_boot_console;
|
2024-08-20 06:29:56 +00:00
|
|
|
extern bool have_nbcon_console;
|
|
|
|
extern bool have_legacy_console;
|
2024-08-20 06:29:55 +00:00
|
|
|
extern bool legacy_allow_panic_sync;
|
2024-08-20 06:29:50 +00:00
|
|
|
|
2024-08-20 06:29:56 +00:00
|
|
|
/**
|
|
|
|
* struct console_flush_type - Define available console flush methods
|
|
|
|
* @nbcon_atomic: Flush directly using nbcon_atomic() callback
|
2024-09-04 12:05:28 +00:00
|
|
|
* @nbcon_offload: Offload flush to printer thread
|
2024-08-20 06:29:56 +00:00
|
|
|
* @legacy_direct: Call the legacy loop in this context
|
2024-09-04 12:05:34 +00:00
|
|
|
* @legacy_offload: Offload the legacy loop into IRQ or legacy thread
|
2024-08-20 06:29:56 +00:00
|
|
|
*
|
|
|
|
* Note that the legacy loop also flushes the nbcon consoles.
|
|
|
|
*/
|
|
|
|
struct console_flush_type {
|
|
|
|
bool nbcon_atomic;
|
2024-09-04 12:05:28 +00:00
|
|
|
bool nbcon_offload;
|
2024-08-20 06:29:56 +00:00
|
|
|
bool legacy_direct;
|
|
|
|
bool legacy_offload;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Identify which console flushing methods should be used in the context of
|
|
|
|
* the caller.
|
|
|
|
*/
|
|
|
|
static inline void printk_get_console_flush_type(struct console_flush_type *ft)
|
|
|
|
{
|
|
|
|
memset(ft, 0, sizeof(*ft));
|
|
|
|
|
|
|
|
switch (nbcon_get_default_prio()) {
|
|
|
|
case NBCON_PRIO_NORMAL:
|
2024-09-04 12:05:28 +00:00
|
|
|
if (have_nbcon_console && !have_boot_console) {
|
|
|
|
if (printk_kthreads_running)
|
|
|
|
ft->nbcon_offload = true;
|
|
|
|
else
|
|
|
|
ft->nbcon_atomic = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Legacy consoles are flushed directly when possible. */
|
|
|
|
if (have_legacy_console || have_boot_console) {
|
|
|
|
if (!is_printk_legacy_deferred())
|
|
|
|
ft->legacy_direct = true;
|
|
|
|
else
|
|
|
|
ft->legacy_offload = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2024-08-20 06:29:57 +00:00
|
|
|
case NBCON_PRIO_EMERGENCY:
|
2024-08-20 06:29:56 +00:00
|
|
|
if (have_nbcon_console && !have_boot_console)
|
|
|
|
ft->nbcon_atomic = true;
|
|
|
|
|
|
|
|
/* Legacy consoles are flushed directly when possible. */
|
|
|
|
if (have_legacy_console || have_boot_console) {
|
|
|
|
if (!is_printk_legacy_deferred())
|
|
|
|
ft->legacy_direct = true;
|
|
|
|
else
|
|
|
|
ft->legacy_offload = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NBCON_PRIO_PANIC:
|
|
|
|
/*
|
|
|
|
* In panic, the nbcon consoles will directly print. But
|
|
|
|
* only allowed if there are no boot consoles.
|
|
|
|
*/
|
|
|
|
if (have_nbcon_console && !have_boot_console)
|
|
|
|
ft->nbcon_atomic = true;
|
|
|
|
|
|
|
|
if (have_legacy_console || have_boot_console) {
|
|
|
|
/*
|
|
|
|
* This is the same decision as NBCON_PRIO_NORMAL
|
|
|
|
* except that offloading never occurs in panic.
|
|
|
|
*
|
|
|
|
* Note that console_flush_on_panic() will flush
|
|
|
|
* legacy consoles anyway, even if unsafe.
|
|
|
|
*/
|
|
|
|
if (!is_printk_legacy_deferred())
|
|
|
|
ft->legacy_direct = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In panic, if nbcon atomic printing occurs,
|
|
|
|
* the legacy consoles must remain silent until
|
|
|
|
* explicitly allowed.
|
|
|
|
*/
|
|
|
|
if (ft->nbcon_atomic && !legacy_allow_panic_sync)
|
|
|
|
ft->legacy_direct = false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-16 19:20:02 +00:00
|
|
|
extern struct printk_buffers printk_shared_pbufs;
|
|
|
|
|
2023-01-09 10:07:56 +00:00
|
|
|
/**
|
|
|
|
* struct printk_buffers - Buffers to read/format/output printk messages.
|
|
|
|
* @outbuf: After formatting, contains text to output.
|
|
|
|
* @scratchbuf: Used as temporary ringbuffer reading and string-print space.
|
|
|
|
*/
|
|
|
|
struct printk_buffers {
|
printk: adjust string limit macros
The various internal size limit macros have names and/or values that
do not fit well to their current usage.
Rename the macros so that their purpose is clear and, if needed,
provide a more appropriate value. In general, the new macros and
values will lead to less memory usage. The new macros are...
PRINTK_MESSAGE_MAX:
This is the maximum size for a formatted message on a console,
devkmsg, or syslog. It does not matter which format the message has
(normal or extended). It replaces the use of CONSOLE_EXT_LOG_MAX for
console and devkmsg. It replaces the use of CONSOLE_LOG_MAX for
syslog.
Historically, normal messages have been allowed to print up to 1kB,
whereas extended messages have been allowed to print up to 8kB.
However, the difference in lengths of these message types is not
significant and in multi-line records, normal messages are probably
larger. Also, because 1kB is only slightly above the allowed record
size, multi-line normal messages could be easily truncated during
formatting.
This new macro should be significantly larger than the allowed
record size to allow sufficient space for extended or multi-line
prefix text. A value of 2kB should be plenty of space. For normal
messages this represents a doubling of the historically allowed
amount. For extended messages it reduces the excessive 8kB size,
thus reducing memory usage needed for message formatting.
PRINTK_PREFIX_MAX:
This is the maximum size allowed for a record prefix (used by
console and syslog). It replaces PREFIX_MAX. The value is left
unchanged.
PRINTKRB_RECORD_MAX:
This is the maximum size allowed to be reserved for a record in the
ringbuffer. It is used by all readers and writers with the printk
ringbuffer. It replaces LOG_LINE_MAX.
Previously this was set to "1kB - PREFIX_MAX", which makes some
sense if 1kB is the limit for normal message output and prefixes are
enabled. However, with the allowance of larger output and the
existence of multi-line records, the value is rather bizarre.
Round the value up to 1kB.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20230109100800.1085541-9-john.ogness@linutronix.de
2023-01-09 10:08:00 +00:00
|
|
|
char outbuf[PRINTK_MESSAGE_MAX];
|
|
|
|
char scratchbuf[PRINTKRB_RECORD_MAX];
|
2023-01-09 10:07:56 +00:00
|
|
|
};
|
2023-01-09 10:07:57 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* struct printk_message - Container for a prepared printk message.
|
|
|
|
* @pbufs: printk buffers used to prepare the message.
|
|
|
|
* @outbuf_len: The length of prepared text in @pbufs->outbuf to output. This
|
|
|
|
* does not count the terminator. A value of 0 means there is
|
|
|
|
* nothing to output and this record should be skipped.
|
|
|
|
* @seq: The sequence number of the record used for @pbufs->outbuf.
|
|
|
|
* @dropped: The number of dropped records from reading @seq.
|
|
|
|
*/
|
|
|
|
struct printk_message {
|
|
|
|
struct printk_buffers *pbufs;
|
|
|
|
unsigned int outbuf_len;
|
|
|
|
u64 seq;
|
|
|
|
unsigned long dropped;
|
|
|
|
};
|
2023-07-17 19:46:07 +00:00
|
|
|
|
|
|
|
bool other_cpu_in_panic(void);
|
2023-09-16 19:20:06 +00:00
|
|
|
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
|
|
|
|
bool is_extended, bool may_supress);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PRINTK
|
|
|
|
void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
|
2024-09-04 12:05:30 +00:00
|
|
|
void console_prepend_replay(struct printk_message *pmsg);
|
2023-09-16 19:20:06 +00:00
|
|
|
#endif
|