Now that we embed the RESx bits in the register description, it becomes easier to deal with registers that are simply not valid, as their existence is not satisfied by the configuration (SCTLR2_ELx without FEAT_SCTLR2, for example). Such registers essentially become RES0 for any bit that wasn't already advertised as RESx. Signed-off-by: Marc Zyngier --- arch/arm64/kvm/config.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c index 28e534f2850ea..0c037742215ac 100644 --- a/arch/arm64/kvm/config.c +++ b/arch/arm64/kvm/config.c @@ -1332,7 +1332,7 @@ struct resx compute_reg_resx_bits(struct kvm *kvm, const struct reg_feat_map_desc *r, unsigned long require, unsigned long exclude) { - struct resx resx, tmp; + struct resx resx; resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, require, exclude); @@ -1342,11 +1342,14 @@ struct resx compute_reg_resx_bits(struct kvm *kvm, resx.res1 |= r->feat_map.masks->res1; } - tmp = compute_resx_bits(kvm, &r->feat_map, 1, require, exclude); - - resx.res0 |= tmp.res0; - resx.res0 |= ~reg_feat_map_bits(&r->feat_map); - resx.res1 |= tmp.res1; + /* + * If the register itself was not valid, all the non-RESx bits are + * now considered RES0 (this matches the behaviour of registers such + * as SCTLR2 and TCR2). Weed out any potential (though unlikely) + * overlap with RES1 bits coming from the previous computation. + */ + resx.res0 |= compute_resx_bits(kvm, &r->feat_map, 1, require, exclude).res0; + resx.res1 &= ~resx.res0; return resx; } -- 2.47.3