Add a new replaceable attribute to allow the coexistence of both atomic-replace and non-atomic-replace livepatches. If replaceable is set to 0, the livepatch will not be replaced by a subsequent atomic-replace operation. This is a preparatory patch for following changes. Signed-off-by: Yafang Shao --- include/linux/livepatch.h | 2 ++ kernel/livepatch/core.c | 44 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index ba9e3988c07c..d88a6966e5f2 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -124,6 +124,7 @@ struct klp_state { * @objs: object entries for kernel objects to be patched * @states: system states that can get modified * @replace: replace all actively used patches + * @replaceable: whether this patch can be replaced or not * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries @@ -138,6 +139,7 @@ struct klp_patch { struct klp_object *objs; struct klp_state *states; bool replace; + bool replaceable; /* internal */ struct list_head list; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 28d15ba58a26..04f9e84f114f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -351,6 +351,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, * /sys/kernel/livepatch//transition * /sys/kernel/livepatch//force * /sys/kernel/livepatch//replace + * /sys/kernel/livepatch//replaceable * /sys/kernel/livepatch//stack_order * /sys/kernel/livepatch// * /sys/kernel/livepatch///patched @@ -478,17 +479,60 @@ static ssize_t stack_order_show(struct kobject *kobj, return sysfs_emit(buf, "%d\n", stack_order); } +static ssize_t replaceable_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct klp_patch *patch; + bool replaceable; + int ret; + + ret = kstrtobool(buf, &replaceable); + if (ret) + return ret; + + patch = container_of(kobj, struct klp_patch, kobj); + + mutex_lock(&klp_mutex); + + if (patch->replaceable == replaceable) + goto out; + + if (patch == klp_transition_patch) { + ret = -EAGAIN; + goto out; + } + + patch->replaceable = replaceable; + +out: + mutex_unlock(&klp_mutex); + + if (ret) + return ret; + return count; +} +static ssize_t replaceable_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_patch *patch; + + patch = container_of(kobj, struct klp_patch, kobj); + return sysfs_emit(buf, "%d\n", patch->replaceable); +} + static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace); static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order); +static struct kobj_attribute replaceable_kobj_attr = __ATTR_RW(replaceable); static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, &transition_kobj_attr.attr, &force_kobj_attr.attr, &replace_kobj_attr.attr, &stack_order_kobj_attr.attr, + &replaceable_kobj_attr.attr, NULL }; ATTRIBUTE_GROUPS(klp_patch); -- 2.47.3