Restore pages from the cleancache during readahead operation. Signed-off-by: Suren Baghdasaryan --- include/linux/cleancache.h | 13 +++++++++ mm/cleancache.c | 58 ++++++++++++++++++++++++++++++++++++++ mm/readahead.c | 54 +++++++++++++++++++++++++++++++++++ 3 files changed, 125 insertions(+) diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index 419faa183aba..75361d1cfe3f 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h @@ -11,6 +11,7 @@ #define CLEANCACHE_KEY_MAX 6 +struct cleancache_inode; #ifdef CONFIG_CLEANCACHE @@ -21,6 +22,11 @@ bool cleancache_store_folio(struct inode *inode, struct folio *folio); bool cleancache_restore_folio(struct inode *inode, struct folio *folio); bool cleancache_invalidate_folio(struct inode *inode, struct folio *folio); bool cleancache_invalidate_inode(struct inode *inode); +struct cleancache_inode * +cleancache_start_inode_walk(struct inode *inode, unsigned long count); +void cleancache_end_inode_walk(struct cleancache_inode *ccinode); +bool cleancache_restore_from_inode(struct cleancache_inode *ccinode, + struct folio *folio); /* * Backend API @@ -50,6 +56,13 @@ static inline bool cleancache_invalidate_folio(struct inode *inode, { return false; } static inline bool cleancache_invalidate_inode(struct inode *inode) { return false; } +static inline struct cleancache_inode * +cleancache_start_inode_walk(struct inode *inode, unsigned long count) + { return NULL; } +static inline void cleancache_end_inode_walk(struct cleancache_inode *ccinode) {} +static inline bool cleancache_restore_from_inode(struct cleancache_inode *ccinode, + struct folio *folio) + { return false; } static inline int cleancache_backend_register_pool(const char *name) { return -EOPNOTSUPP; } static inline int cleancache_backend_get_folio(int pool_id, struct folio *folio) diff --git a/mm/cleancache.c b/mm/cleancache.c index 3acf46c0cdd1..6be86938c8fe 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -799,6 +799,64 @@ bool cleancache_invalidate_inode(struct inode *inode) return count > 0; } +struct cleancache_inode * +cleancache_start_inode_walk(struct inode *inode, unsigned long count) +{ + struct cleancache_inode *ccinode; + struct cleancache_fs *fs; + int fs_id; + + if (!inode) + return ERR_PTR(-EINVAL); + + fs_id = inode->i_sb->cleancache_id; + if (fs_id == CLEANCACHE_ID_INVALID) + return ERR_PTR(-EINVAL); + + fs = get_fs(fs_id); + if (!fs) + return NULL; + + ccinode = find_and_get_inode(fs, inode); + if (!ccinode) { + put_fs(fs); + return NULL; + } + + return ccinode; +} + +void cleancache_end_inode_walk(struct cleancache_inode *ccinode) +{ + struct cleancache_fs *fs = ccinode->fs; + + put_inode(ccinode); + put_fs(fs); +} + +bool cleancache_restore_from_inode(struct cleancache_inode *ccinode, + struct folio *folio) +{ + struct folio *stored_folio; + void *src, *dst; + bool ret = false; + + xa_lock(&ccinode->folios); + stored_folio = xa_load(&ccinode->folios, folio->index); + if (stored_folio) { + rotate_lru_folio(stored_folio); + src = kmap_local_folio(stored_folio, 0); + dst = kmap_local_folio(folio, 0); + memcpy(dst, src, PAGE_SIZE); + kunmap_local(dst); + kunmap_local(src); + ret = true; + } + xa_unlock(&ccinode->folios); + + return ret; +} + /* Backend API */ /* * Register a new backend and add its folios for cleancache to use. diff --git a/mm/readahead.c b/mm/readahead.c index 3a4b5d58eeb6..878cc8dfa48e 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -128,6 +128,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -146,12 +147,65 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); +static inline bool restore_from_cleancache(struct readahead_control *rac) +{ + XA_STATE(xas, &rac->mapping->i_pages, rac->_index); + struct cleancache_inode *ccinode; + struct folio *folio; + unsigned long end; + bool ret = true; + + int count = readahead_count(rac); + + /* Readahead should not have started yet. */ + VM_BUG_ON(rac->_batch_count != 0); + + if (!count) + return true; + + ccinode = cleancache_start_inode_walk(rac->mapping->host, count); + if (!ccinode) + return false; + + end = rac->_index + rac->_nr_pages - 1; + xas_for_each(&xas, folio, end) { + unsigned long nr; + + if (xas_retry(&xas, folio)) { + ret = false; + break; + } + + if (!cleancache_restore_from_inode(ccinode, folio)) { + ret = false; + break; + } + + nr = folio_nr_pages(folio); + folio_mark_uptodate(folio); + folio_unlock(folio); + rac->_index += nr; + rac->_nr_pages -= nr; + rac->ra->size -= nr; + if (rac->ra->async_size >= nr) + rac->ra->async_size -= nr; + } + + cleancache_end_inode_walk(ccinode); + + return ret; +} + static void read_pages(struct readahead_control *rac) { const struct address_space_operations *aops = rac->mapping->a_ops; struct folio *folio; struct blk_plug plug; + /* Try to read all pages from the cleancache */ + if (restore_from_cleancache(rac)) + return; + if (!readahead_count(rac)) return; -- 2.51.1.851.g4ebd6896fd-goog