Move reused shadow pages to the head of active_mmu_pages in __kvm_mmu_get_shadow_page(). This will allow us to move towards more of a LRU approximation eviction strategy instead of just straight FIFO. Signed-off-by: Hamza Mahfooz --- arch/x86/kvm/mmu/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 02c450686b4a..2fe04e01863d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2395,7 +2395,8 @@ static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm, if (!sp) { created = true; sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role); - } + } else if (!list_is_head(&sp->link, &kvm->arch.active_mmu_pages)) + list_move(&sp->link, &kvm->arch.active_mmu_pages); trace_kvm_mmu_get_page(sp, created); return sp; -- 2.52.0