Restore pages from the cleancache during readahead operation. Signed-off-by: Suren Baghdasaryan --- include/linux/cleancache.h | 17 +++++++++++ mm/cleancache.c | 59 ++++++++++++++++++++++++++++++++++++++ mm/readahead.c | 55 +++++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+) diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index 458a7a25a8af..28b6d7b25964 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h @@ -11,6 +11,7 @@ #define CLEANCACHE_KEY_MAX 6 +struct cleancache_inode; #ifdef CONFIG_CLEANCACHE @@ -24,6 +25,14 @@ bool cleancache_invalidate_folio(struct address_space *mapping, bool cleancache_invalidate_inode(struct address_space *mapping, struct inode *inode); +struct cleancache_inode * +cleancache_start_inode_walk(struct address_space *mapping, + struct inode *inode, + unsigned long count); +void cleancache_end_inode_walk(struct cleancache_inode *ccinode); +bool cleancache_restore_from_inode(struct cleancache_inode *ccinode, + struct folio *folio); + /* * Backend API * @@ -53,6 +62,14 @@ static inline bool cleancache_invalidate_folio(struct address_space *mapping, static inline bool cleancache_invalidate_inode(struct address_space *mapping, struct inode *inode) { return false; } +static inline struct cleancache_inode * +cleancache_start_inode_walk(struct address_space *mapping, struct inode *inode, + unsigned long count) + { return NULL; } +static inline void cleancache_end_inode_walk(struct cleancache_inode *ccinode) {} +static inline bool cleancache_restore_from_inode(struct cleancache_inode *ccinode, + struct folio *folio) + { return false; } static inline int cleancache_backend_register_pool(const char *name) { return -EOPNOTSUPP; } static inline int cleancache_backend_get_folio(int pool_id, struct folio *folio) diff --git a/mm/cleancache.c b/mm/cleancache.c index 73a8b2655def..59b8fd309619 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -813,6 +813,65 @@ bool cleancache_invalidate_inode(struct address_space *mapping, return count > 0; } +struct cleancache_inode * +cleancache_start_inode_walk(struct address_space *mapping, struct inode *inode, + unsigned long count) +{ + struct cleancache_inode *ccinode; + struct cleancache_fs *fs; + int fs_id; + + if (!inode) + return ERR_PTR(-EINVAL); + + fs_id = mapping->host->i_sb->cleancache_id; + if (fs_id == CLEANCACHE_ID_INVALID) + return ERR_PTR(-EINVAL); + + fs = get_fs(fs_id); + if (!fs) + return NULL; + + ccinode = find_and_get_inode(fs, inode); + if (!ccinode) { + put_fs(fs); + return NULL; + } + + return ccinode; +} + +void cleancache_end_inode_walk(struct cleancache_inode *ccinode) +{ + struct cleancache_fs *fs = ccinode->fs; + + put_inode(ccinode); + put_fs(fs); +} + +bool cleancache_restore_from_inode(struct cleancache_inode *ccinode, + struct folio *folio) +{ + struct folio *stored_folio; + void *src, *dst; + bool ret = false; + + xa_lock(&ccinode->folios); + stored_folio = xa_load(&ccinode->folios, folio->index); + if (stored_folio) { + rotate_lru_folio(stored_folio); + src = kmap_local_folio(stored_folio, 0); + dst = kmap_local_folio(folio, 0); + memcpy(dst, src, PAGE_SIZE); + kunmap_local(dst); + kunmap_local(src); + ret = true; + } + xa_unlock(&ccinode->folios); + + return ret; +} + /* Backend API */ /* * Register a new backend and add its folios for cleancache to use. diff --git a/mm/readahead.c b/mm/readahead.c index 3a4b5d58eeb6..6f4986a5e14a 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -128,6 +128,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -146,12 +147,66 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); +static inline bool restore_from_cleancache(struct readahead_control *rac) +{ + XA_STATE(xas, &rac->mapping->i_pages, rac->_index); + struct cleancache_inode *ccinode; + struct folio *folio; + unsigned long end; + bool ret = true; + + int count = readahead_count(rac); + + /* Readahead should not have started yet. */ + VM_BUG_ON(rac->_batch_count != 0); + + if (!count) + return true; + + ccinode = cleancache_start_inode_walk(rac->mapping, rac->mapping->host, + count); + if (!ccinode) + return false; + + end = rac->_index + rac->_nr_pages - 1; + xas_for_each(&xas, folio, end) { + unsigned long nr; + + if (xas_retry(&xas, folio)) { + ret = false; + break; + } + + if (!cleancache_restore_from_inode(ccinode, folio)) { + ret = false; + break; + } + + nr = folio_nr_pages(folio); + folio_mark_uptodate(folio); + folio_unlock(folio); + rac->_index += nr; + rac->_nr_pages -= nr; + rac->ra->size -= nr; + if (rac->ra->async_size >= nr) + rac->ra->async_size -= nr; + } + + cleancache_end_inode_walk(ccinode); + + return ret; +} + static void read_pages(struct readahead_control *rac) { const struct address_space_operations *aops = rac->mapping->a_ops; struct folio *folio; struct blk_plug plug; + /* Try to read all pages from the cleancache */ + if (restore_from_cleancache(rac)) + return; + if (!readahead_count(rac)) return; -- 2.51.0.740.g6adb054d12-goog