Once all folios in the cleancache are used to store data from previously evicted folios, no more data can be stored there. To avoid that situation we can drop older data and make space for new one. Add an LRU for cleancache folios to reclaim the oldest folio when cleancache is full and we need to store a new folio. Signed-off-by: Suren Baghdasaryan Signed-off-by: Minchan Kim --- mm/cleancache.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 2 deletions(-) diff --git a/mm/cleancache.c b/mm/cleancache.c index 0023962de024..73a8b2655def 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -19,6 +19,13 @@ * * ccinode->folios.xa_lock * pool->lock + * + * ccinode->folios.xa_lock + * lru_lock + * + * ccinode->folios.xa_lock + * lru_lock + * pool->lock */ #define INODE_HASH_BITS 6 @@ -60,6 +67,8 @@ static struct kmem_cache *slab_inode; /* cleancache_inode slab */ static struct cleancache_pool pools[CLEANCACHE_MAX_POOLS]; static atomic_t nr_pools = ATOMIC_INIT(0); static DEFINE_SPINLOCK(pools_lock); /* protects pools */ +static LIST_HEAD(cleancache_lru); +static DEFINE_SPINLOCK(lru_lock); /* protects cleancache_lru */ /* * Folio attributes: @@ -130,6 +139,7 @@ static inline bool is_folio_attached(struct folio *folio) /* * Folio pool helpers. * Only detached folios are stored in the pool->folio_list. + * Once a folio gets attached, it's placed on the cleancache LRU list. * * Locking: * pool->folio_list is accessed under pool->lock. @@ -181,6 +191,32 @@ static struct folio *pick_folio_from_any_pool(void) return folio; } +/* Folio LRU helpers. Only attached folios are stored in the cleancache_lru. */ +static void add_folio_to_lru(struct folio *folio) +{ + VM_BUG_ON(!list_empty(&folio->lru)); + + spin_lock(&lru_lock); + list_add(&folio->lru, &cleancache_lru); + spin_unlock(&lru_lock); +} + +static void rotate_lru_folio(struct folio *folio) +{ + spin_lock(&lru_lock); + if (!list_empty(&folio->lru)) + list_move(&folio->lru, &cleancache_lru); + spin_unlock(&lru_lock); +} + +static void delete_folio_from_lru(struct folio *folio) +{ + spin_lock(&lru_lock); + if (!list_empty(&folio->lru)) + list_del_init(&folio->lru); + spin_unlock(&lru_lock); +} + /* FS helpers */ static struct cleancache_fs *get_fs(int fs_id) { @@ -316,6 +352,7 @@ static void erase_folio_from_inode(struct cleancache_inode *ccinode, removed = __xa_erase(&ccinode->folios, offset); VM_BUG_ON(!removed); + delete_folio_from_lru(folio); remove_inode_if_empty(ccinode); } @@ -413,6 +450,48 @@ static struct cleancache_inode *add_and_get_inode(struct cleancache_fs *fs, return ccinode; } +static struct folio *reclaim_folio_from_lru(void) +{ + struct cleancache_inode *ccinode; + unsigned long offset; + struct folio *folio; + +again: + spin_lock(&lru_lock); + if (list_empty(&cleancache_lru)) { + spin_unlock(&lru_lock); + return NULL; + } + ccinode = NULL; + /* Get the ccinode of the folio at the LRU tail */ + list_for_each_entry_reverse(folio, &cleancache_lru, lru) { + struct cleancache_pool *pool = folio_pool(folio); + + /* Find and get ccinode */ + spin_lock(&pool->lock); + folio_attachment(folio, &ccinode, &offset); + if (ccinode && !get_inode(ccinode)) + ccinode = NULL; + spin_unlock(&pool->lock); + if (ccinode) + break; + } + spin_unlock(&lru_lock); + + if (!ccinode) + return NULL; /* No ccinode to reclaim */ + + if (!isolate_folio_from_inode(ccinode, offset, folio)) { + /* Retry if the folio got erased from the ccinode */ + put_inode(ccinode); + goto again; + } + + put_inode(ccinode); + + return folio; +} + static void copy_folio_content(struct folio *from, struct folio *to) { void *src = kmap_local_folio(from, 0); @@ -468,14 +547,19 @@ static bool store_into_inode(struct cleancache_fs *fs, move_folio_from_inode_to_pool(ccinode, offset, stored_folio); goto out_unlock; } + rotate_lru_folio(stored_folio); } else { if (!workingset) goto out_unlock; stored_folio = pick_folio_from_any_pool(); if (!stored_folio) { - /* No free folios, TODO: try reclaiming */ - goto out_unlock; + /* No free folios, try reclaiming */ + xa_unlock(&ccinode->folios); + stored_folio = reclaim_folio_from_lru(); + xa_lock(&ccinode->folios); + if (!stored_folio) + goto out_unlock; } if (!store_folio_in_inode(ccinode, offset, stored_folio)) { @@ -487,6 +571,7 @@ static bool store_into_inode(struct cleancache_fs *fs, spin_unlock(&pool->lock); goto out_unlock; } + add_folio_to_lru(stored_folio); } copy_folio_content(folio, stored_folio); @@ -516,6 +601,7 @@ static bool load_from_inode(struct cleancache_fs *fs, xa_lock(&ccinode->folios); stored_folio = xa_load(&ccinode->folios, offset); if (stored_folio) { + rotate_lru_folio(stored_folio); copy_folio_content(stored_folio, folio); ret = true; } -- 2.51.0.740.g6adb054d12-goog