Besides checking if the current console is NBCON or not, console->flags is also being read in order to serve as argument of the console_is_usable function. But CON_NBCON flag is unique: it's set just once in the console registration and never cleared. In this case it can be possible to read the flag when console_srcu_lock is held (which is the case when using for_each_console). This change makes possible to remove the flags argument from console_is_usable in the next patches. Signed-off-by: Petr Mladek Signed-off-by: Marcos Paulo de Souza --- include/linux/console.h | 27 +++++++++++++++++++++++++++ kernel/debug/kdb/kdb_io.c | 2 +- kernel/printk/nbcon.c | 2 +- kernel/printk/printk.c | 15 ++++++--------- 4 files changed, 35 insertions(+), 11 deletions(-) diff --git a/include/linux/console.h b/include/linux/console.h index 35c03fc4ed51..dd4ec7a5bff9 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -561,6 +561,33 @@ static inline void console_srcu_write_flags(struct console *con, short flags) WRITE_ONCE(con->flags, flags); } +/** + * console_srcu_is_nbcon - Locklessly check whether the console is nbcon + * @con: struct console pointer of console to check + * + * Requires console_srcu_read_lock to be held, which implies that @con might + * be a registered console. The purpose of holding console_srcu_read_lock is + * to guarantee that no exit/cleanup routines will run if the console + * is currently undergoing unregistration. + * + * If the caller is holding the console_list_lock or it is _certain_ that + * @con is not and will not become registered, the caller may read + * @con->flags directly instead. + * + * Context: Any context. + * Return: True when CON_NBCON flag is set. + */ +static inline bool console_is_nbcon(const struct console *con) +{ + WARN_ON_ONCE(!console_srcu_read_lock_is_held()); + + /* + * The CON_NBCON flag is statically initialized and is never + * set or cleared at runtime. + */ + return data_race(con->flags & CON_NBCON); +} + /* Variant of console_is_registered() when the console_list_lock is held. */ static inline bool console_is_registered_locked(const struct console *con) { diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 6ffb962392a4..d6de512b433a 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -596,7 +596,7 @@ static void kdb_msg_write(const char *msg, int msg_len) if (c == dbg_io_ops->cons) continue; - if (flags & CON_NBCON) { + if (console_is_nbcon(c)) { struct nbcon_write_context wctxt = { }; /* diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c index 13865ef85990..f0f42e212caa 100644 --- a/kernel/printk/nbcon.c +++ b/kernel/printk/nbcon.c @@ -1647,7 +1647,7 @@ static void __nbcon_atomic_flush_pending(u64 stop_seq) for_each_console_srcu(con) { short flags = console_srcu_read_flags(con); - if (!(flags & CON_NBCON)) + if (!console_is_nbcon(con)) continue; if (!console_is_usable(con, flags, NBCON_USE_ATOMIC)) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 5f4b84f9562e..bd0d574be3cf 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3200,7 +3200,7 @@ static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool * * nbcon consoles when the nbcon consoles cannot print via * their atomic or threaded flushing. */ - if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) + if (console_is_nbcon(con) && (ft.nbcon_atomic || ft.nbcon_offload)) continue; if (!console_is_usable(con, flags, @@ -3209,7 +3209,7 @@ static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool * continue; any_usable = true; - if (flags & CON_NBCON) { + if (console_is_nbcon(con)) { progress = nbcon_legacy_emit_next_record(con, handover, cookie, !do_cond_resched); printk_seq = nbcon_seq_read(con); @@ -3458,7 +3458,6 @@ void console_unblank(void) static void __console_rewind_all(void) { struct console *c; - short flags; int cookie; u64 seq; @@ -3466,9 +3465,7 @@ static void __console_rewind_all(void) cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - flags = console_srcu_read_flags(c); - - if (flags & CON_NBCON) { + if (console_is_nbcon(c)) { nbcon_seq_force(c, seq); } else { /* @@ -3632,13 +3629,13 @@ static bool legacy_kthread_should_wakeup(void) * consoles when the nbcon consoles cannot print via their * atomic or threaded flushing. */ - if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) + if (console_is_nbcon(con) && (ft.nbcon_atomic || ft.nbcon_offload)) continue; if (!console_is_usable(con, flags, NBCON_USE_THREAD)) continue; - if (flags & CON_NBCON) { + if (console_is_nbcon(con)) { printk_seq = nbcon_seq_read(con); } else { /* @@ -4490,7 +4487,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre if (!console_is_usable(c, flags, NBCON_USE_ANY)) continue; - if (flags & CON_NBCON) { + if (console_is_nbcon(c)) { printk_seq = nbcon_seq_read(c); } else { printk_seq = c->seq; -- 2.52.0