The lazy_mmu API now allows nested sections to be handled by arch code: enter() can return a flag if called inside another lazy_mmu section, so that the matching call to leave() leaves any optimisation enabled. This patch implements that new logic for powerpc: if there is an active batch, then enter() returns LAZY_MMU_NESTED and the matching leave() leaves batch->active set. The preempt_{enable,disable} calls are left untouched as they already handle nesting themselves. TLB flushing is still done in leave() regardless of the nesting level, as the caller may rely on it whether nesting is occurring or not. Signed-off-by: Kevin Brodsky --- .../powerpc/include/asm/book3s/64/tlbflush-hash.h | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index c9f1e819e567..001c474da1fe 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -30,6 +30,7 @@ typedef int lazy_mmu_state_t; static inline lazy_mmu_state_t arch_enter_lazy_mmu_mode(void) { struct ppc64_tlb_batch *batch; + int lazy_mmu_nested; if (radix_enabled()) return LAZY_MMU_DEFAULT; @@ -39,9 +40,14 @@ static inline lazy_mmu_state_t arch_enter_lazy_mmu_mode(void) */ preempt_disable(); batch = this_cpu_ptr(&ppc64_tlb_batch); - batch->active = 1; + lazy_mmu_nested = batch->active; - return LAZY_MMU_DEFAULT; + if (!lazy_mmu_nested) { + batch->active = 1; + return LAZY_MMU_DEFAULT; + } else { + return LAZY_MMU_NESTED; + } } static inline void arch_leave_lazy_mmu_mode(lazy_mmu_state_t state) @@ -54,7 +60,10 @@ static inline void arch_leave_lazy_mmu_mode(lazy_mmu_state_t state) if (batch->index) __flush_tlb_pending(batch); - batch->active = 0; + + if (state != LAZY_MMU_NESTED) + batch->active = 0; + preempt_enable(); } -- 2.47.0