When there is memory pressure causing OOM, fully reclaim objects from the global list that have not reached the threshold. Signed-off-by: Lei Liu --- include/linux/swapfile.h | 1 + mm/page_alloc.c | 4 ++++ mm/swapfile.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index dc43464cd838..04c660aae7a0 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -5,6 +5,7 @@ extern unsigned long generic_max_swapfile_size(void); unsigned long arch_max_swapfile_size(void); int add_to_swap_gather_cache(struct mm_struct *mm, swp_entry_t entry, int nr); +void flush_cache_if_needed(bool check_ache_entries); /* Maximum swapfile size supported for the arch (not inclusive). */ extern unsigned long swapfile_maximum_size; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1d037f97c5f..7c5990c24df7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" @@ -3967,6 +3968,9 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, *did_some_progress = 0; + /* flash async swap cache pool */ + flush_cache_if_needed(false); + /* * Acquire the oom lock. If that fails, somebody else is * making progress for us. diff --git a/mm/swapfile.c b/mm/swapfile.c index 7c69e726b075..26640ec34fc6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -229,7 +229,7 @@ static void async_release_func(struct work_struct *work) kfree(work); } -static void flush_cache_if_needed(bool check_cache_count) +void flush_cache_if_needed(bool check_cache_count) { struct work_struct *release_work; -- 2.34.1