The atomic_long_cmpxchg() ensures at most one active watchpoint exists at any time, with ksw_watch_on() succeeding only when no watch is active (current address is placeholder) and ksw_watch_off() succeeding only when the caller knows the active watch address. For cross-CPU synchronization, updates are propagated using direct modification on the local CPU and asynchronous IPIs for remote CPUs. Signed-off-by: Jinchao Wang --- mm/kstackwatch/kstackwatch.h | 2 + mm/kstackwatch/watch.c | 73 +++++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/mm/kstackwatch/kstackwatch.h b/mm/kstackwatch/kstackwatch.h index 3ea191370970..0786fa961011 100644 --- a/mm/kstackwatch/kstackwatch.h +++ b/mm/kstackwatch/kstackwatch.h @@ -41,5 +41,7 @@ const struct ksw_config *ksw_get_config(void); /* watch management */ int ksw_watch_init(void); void ksw_watch_exit(void); +int ksw_watch_on(ulong watch_addr, u16 watch_len); +int ksw_watch_off(ulong watch_addr, u16 watch_len); #endif /* _KSTACKWATCH_H */ diff --git a/mm/kstackwatch/watch.c b/mm/kstackwatch/watch.c index d3399ac840b2..14549e02faf1 100644 --- a/mm/kstackwatch/watch.c +++ b/mm/kstackwatch/watch.c @@ -2,6 +2,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include @@ -9,10 +10,16 @@ static struct perf_event *__percpu *watch_events; -static unsigned long watch_holder; +static ulong watch_holder; +static atomic_long_t watched_addr = ATOMIC_LONG_INIT((ulong)&watch_holder); static struct perf_event_attr watch_attr; +static void ksw_watch_on_local_cpu(void *info); + +static DEFINE_PER_CPU(call_single_data_t, + watch_csd) = CSD_INIT(ksw_watch_on_local_cpu, NULL); + bool panic_on_catch; module_param(panic_on_catch, bool, 0644); MODULE_PARM_DESC(panic_on_catch, "panic immediately on corruption catch"); @@ -29,6 +36,70 @@ static void ksw_watch_handler(struct perf_event *bp, panic("Stack corruption detected"); } +static void ksw_watch_on_local_cpu(void *data) +{ + struct perf_event *bp; + ulong flags; + int cpu; + int ret; + + local_irq_save(flags); + cpu = raw_smp_processor_id(); + bp = *per_cpu_ptr(watch_events, cpu); + if (!bp) { + local_irq_restore(flags); + return; + } + + ret = modify_wide_hw_breakpoint_local(bp, &watch_attr); + local_irq_restore(flags); + + if (ret) { + pr_err("failed to reinstall HWBP on CPU %d ret %d\n", cpu, + ret); + return; + } +} + +static void __ksw_watch_target(ulong addr, u16 len) +{ + int cpu; + call_single_data_t *csd; + + watch_attr.bp_addr = addr; + watch_attr.bp_len = len; + + /* ensure watchpoint update is visible to other CPUs before IPI */ + smp_wmb(); + + for_each_online_cpu(cpu) { + if (cpu == raw_smp_processor_id()) { + ksw_watch_on_local_cpu(NULL); + } else { + csd = &per_cpu(watch_csd, cpu); + smp_call_function_single_async(cpu, csd); + } + } +} + +static int ksw_watch_target(ulong old_addr, ulong new_addr, u16 watch_len) +{ + if (atomic_long_cmpxchg(&watched_addr, old_addr, new_addr) != old_addr) + return -EINVAL; + __ksw_watch_target(new_addr, watch_len); + return 0; +} + +int ksw_watch_on(ulong watch_addr, u16 watch_len) +{ + return ksw_watch_target((ulong)&watch_holder, watch_addr, watch_len); +} + +int ksw_watch_off(ulong watch_addr, u16 watch_len) +{ + return ksw_watch_target(watch_addr, (ulong)&watch_holder, watch_len); +} + int ksw_watch_init(void) { int ret; -- 2.43.0