They are useful information for debugging split huge page tests. Signed-off-by: Zi Yan Reviewed-by: Wei Yang Reviewed-by: Donet Tom Reviewed-by: wang lian Reviewed-by: Baolin Wang Reviewed-by: Barry Song Acked-by: David Hildenbrand --- mm/huge_memory.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index aac5f0a2cb54..2a47cd3bb649 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4320,8 +4320,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, goto out; } - pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", - pid, vaddr_start, vaddr_end); + pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n", + pid, vaddr_start, vaddr_end, new_order, in_folio_offset); mmap_read_lock(mm); /* @@ -4431,8 +4431,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, if (IS_ERR(candidate)) goto out; - pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", - file_path, off_start, off_end); + pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n", + file_path, off_start, off_end, new_order, in_folio_offset); mapping = candidate->f_mapping; min_order = mapping_min_folio_order(mapping); -- 2.50.1 All functions are only used within the file. Signed-off-by: Zi Yan Reviewed-by: Wei Yang Reviewed-by: wang lian Acked-by: David Hildenbrand --- .../selftests/mm/split_huge_page_test.c | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 54e86f00aabc..089e146efeab 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -37,7 +37,7 @@ uint64_t pmd_pagesize; #define PFN_MASK ((1UL<<55)-1) #define KPF_THP (1UL<<22) -int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) +static int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) { uint64_t paddr; uint64_t page_flags; @@ -135,7 +135,7 @@ static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, int nr_hp rss_anon_before, rss_anon_after); } -void split_pmd_zero_pages(void) +static void split_pmd_zero_pages(void) { char *one_page; int nr_hpages = 4; @@ -147,7 +147,7 @@ void split_pmd_zero_pages(void) free(one_page); } -void split_pmd_thp_to_order(int order) +static void split_pmd_thp_to_order(int order) { char *one_page; size_t len = 4 * pmd_pagesize; @@ -181,7 +181,7 @@ void split_pmd_thp_to_order(int order) free(one_page); } -void split_pte_mapped_thp(void) +static void split_pte_mapped_thp(void) { char *one_page, *pte_mapped, *pte_mapped2; size_t len = 4 * pmd_pagesize; @@ -264,7 +264,7 @@ void split_pte_mapped_thp(void) close(kpageflags_fd); } -void split_file_backed_thp(int order) +static void split_file_backed_thp(int order) { int status; int fd; @@ -364,7 +364,7 @@ void split_file_backed_thp(int order) ksft_exit_fail_msg("Error occurred\n"); } -bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template, +static bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template, const char **thp_fs_loc) { if (xfs_path) { @@ -380,7 +380,7 @@ bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template, return true; } -void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp) +static void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp) { int status; @@ -393,8 +393,8 @@ void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp) strerror(errno)); } -int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, - char **addr) +static int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, + int *fd, char **addr) { size_t i; unsigned char buf[1024]; @@ -460,8 +460,8 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, return -1; } -void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, - int order, int offset) +static void split_thp_in_pagecache_to_order_at(size_t fd_size, + const char *fs_loc, int order, int offset) { int fd; char *addr; -- 2.50.1 and rename it to is_backed_by_folio(). is_backed_by_folio() checks if the given vaddr is backed a folio with a given order. It does so by: 1. getting the pfn of the vaddr; 2. checking kpageflags of the pfn; if order is greater than 0: 3. checking kpageflags of the head pfn; 4. checking kpageflags of all tail pfns. pmd_order is added to split_huge_page_test.c and replaces max_order. Signed-off-by: Zi Yan Reviewed-by: Wei Yang Reviewed-by: wang lian --- .../selftests/mm/split_huge_page_test.c | 88 ++++++++++++++----- tools/testing/selftests/mm/vm_util.c | 13 +++ tools/testing/selftests/mm/vm_util.h | 4 + 3 files changed, 81 insertions(+), 24 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 089e146efeab..56d1eaf9a860 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -25,6 +25,7 @@ uint64_t pagesize; unsigned int pageshift; uint64_t pmd_pagesize; +unsigned int pmd_order; #define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" #define SMAP_PATH "/proc/self/smaps" @@ -34,26 +35,66 @@ uint64_t pmd_pagesize; #define PID_FMT_OFFSET "%d,0x%lx,0x%lx,%d,%d" #define PATH_FMT "%s,0x%lx,0x%lx,%d" -#define PFN_MASK ((1UL<<55)-1) -#define KPF_THP (1UL<<22) - -static int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) +static bool is_backed_by_folio(char *vaddr, int order, int pagemap_fd, + int kpageflags_fd) { - uint64_t paddr; - uint64_t page_flags; + unsigned long pfn_head; + uint64_t pfn_flags; + unsigned long pfn; + unsigned long i; - if (pagemap_file) { - pread(pagemap_file, &paddr, sizeof(paddr), - ((long)vaddr >> pageshift) * sizeof(paddr)); + pfn = pagemap_get_pfn(pagemap_fd, vaddr); - if (kpageflags_file) { - pread(kpageflags_file, &page_flags, sizeof(page_flags), - (paddr & PFN_MASK) * sizeof(page_flags)); + /* non present page */ + if (pfn == -1UL) + return false; - return !!(page_flags & KPF_THP); - } + if (pageflags_get(pfn, kpageflags_fd, &pfn_flags)) + goto fail; + + /* check for order-0 pages */ + if (!order) { + if (pfn_flags & (KPF_THP | KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL)) + return false; + return true; } - return 0; + + /* non THP folio */ + if (!(pfn_flags & KPF_THP)) + return false; + + pfn_head = pfn & ~((1 << order) - 1); + + if (pageflags_get(pfn_head, kpageflags_fd, &pfn_flags)) + goto fail; + + /* head PFN has no compound_head flag set */ + if (!(pfn_flags & (KPF_THP | KPF_COMPOUND_HEAD))) + return false; + + /* check all tail PFN flags */ + for (i = 1; i < 1UL << order; i++) { + if (pageflags_get(pfn_head + i, kpageflags_fd, &pfn_flags)) + goto fail; + if (!(pfn_flags & (KPF_THP | KPF_COMPOUND_TAIL))) + return false; + } + + /* + * check the PFN after this folio, but if its flags cannot be obtained, + * assume this folio has the expected order + */ + if (pageflags_get(pfn_head + (1UL << order), kpageflags_fd, &pfn_flags)) + return true; + + /* this folio is bigger than the given order */ + if (pfn_flags & (KPF_THP | KPF_COMPOUND_TAIL)) + return false; + + return true; +fail: + ksft_exit_fail_msg("Failed to get folio info\n"); + return false; } static void write_file(const char *path, const char *buf, size_t buflen) @@ -234,7 +275,7 @@ static void split_pte_mapped_thp(void) thp_size = 0; for (i = 0; i < pagesize * 4; i++) if (i % pagesize == 0 && - is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + is_backed_by_folio(&pte_mapped[i], pmd_order, pagemap_fd, kpageflags_fd)) thp_size++; if (thp_size != 4) @@ -251,7 +292,7 @@ static void split_pte_mapped_thp(void) ksft_exit_fail_msg("%ld byte corrupted\n", i); if (i % pagesize == 0 && - is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + !is_backed_by_folio(&pte_mapped[i], 0, pagemap_fd, kpageflags_fd)) thp_size++; } @@ -523,7 +564,6 @@ int main(int argc, char **argv) const char *fs_loc; bool created_tmp; int offset; - unsigned int max_order; unsigned int nr_pages; unsigned int tests; @@ -544,28 +584,28 @@ int main(int argc, char **argv) ksft_exit_fail_msg("Reading PMD pagesize failed\n"); nr_pages = pmd_pagesize / pagesize; - max_order = sz2ord(pmd_pagesize, pagesize); - tests = 2 + (max_order - 1) + (2 * max_order) + (max_order - 1) * 4 + 2; + pmd_order = sz2ord(pmd_pagesize, pagesize); + tests = 2 + (pmd_order - 1) + (2 * pmd_order) + (pmd_order - 1) * 4 + 2; ksft_set_plan(tests); fd_size = 2 * pmd_pagesize; split_pmd_zero_pages(); - for (i = 0; i < max_order; i++) + for (i = 0; i < pmd_order; i++) if (i != 1) split_pmd_thp_to_order(i); split_pte_mapped_thp(); - for (i = 0; i < max_order; i++) + for (i = 0; i < pmd_order; i++) split_file_backed_thp(i); created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template, &fs_loc); - for (i = max_order - 1; i >= 0; i--) + for (i = pmd_order - 1; i >= 0; i--) split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1); - for (i = 0; i < max_order; i++) + for (i = 0; i < pmd_order; i++) for (offset = 0; offset < nr_pages; offset += MAX(nr_pages / 4, 1 << i)) diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c index 6a239aa413e2..741fc129313d 100644 --- a/tools/testing/selftests/mm/vm_util.c +++ b/tools/testing/selftests/mm/vm_util.c @@ -338,6 +338,19 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max) return count; } +int pageflags_get(unsigned long pfn, int kpageflags_fd, uint64_t *flags) +{ + size_t count; + + count = pread(kpageflags_fd, flags, sizeof(*flags), + pfn * sizeof(*flags)); + + if (count != sizeof(*flags)) + return -1; + + return 0; +} + /* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */ int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor, uint64_t *ioctls) diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index 3da56feeb944..ab8722f482ae 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -18,6 +18,9 @@ #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) +#define KPF_COMPOUND_HEAD BIT_ULL(15) +#define KPF_COMPOUND_TAIL BIT_ULL(16) +#define KPF_THP BIT_ULL(22) /* * Ignore the checkpatch warning, we must read from x but don't want to do * anything with it in order to trigger a read page fault. We therefore must use @@ -85,6 +88,7 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size); int64_t allocate_transhuge(void *ptr, int pagemap_fd); unsigned long default_huge_page_size(void); int detect_hugetlb_page_sizes(size_t sizes[], int max); +int pageflags_get(unsigned long pfn, int kpageflags_fd, uint64_t *flags); int uffd_register(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor); -- 2.50.1 The helper gathers a folio order statistics of folios within a virtual address range and checks it against a given order list. It aims to provide a more precise folio order check instead of just checking the existence of PMD folios. The helper will be used the upcoming commit. Signed-off-by: Zi Yan --- .../selftests/mm/split_huge_page_test.c | 152 ++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 56d1eaf9a860..e24df02420ad 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -97,6 +97,158 @@ static bool is_backed_by_folio(char *vaddr, int order, int pagemap_fd, return false; } +static int vaddr_pageflags_get(char *vaddr, int pagemap_fd, int kpageflags_fd, + uint64_t *flags) +{ + unsigned long pfn; + + pfn = pagemap_get_pfn(pagemap_fd, vaddr); + + /* non-present PFN */ + if (pfn == -1UL) + return 1; + + if (pageflags_get(pfn, kpageflags_fd, flags)) + return -1; + + return 0; +} + +/* + * gather_after_split_folio_orders - scan through [vaddr_start, len) and record + * folio orders + * + * @vaddr_start: start vaddr + * @len: range length + * @pagemap_fd: file descriptor to /proc//pagemap + * @kpageflags_fd: file descriptor to /proc/kpageflags + * @orders: output folio order array + * @nr_orders: folio order array size + * + * gather_after_split_folio_orders() scan through [vaddr_start, len) and check + * all folios within the range and record their orders. All order-0 pages will + * be recorded. Non-present vaddr is skipped. + * + * NOTE: the function is used to check folio orders after a split is performed, + * so it assumes [vaddr_start, len) fully maps to after-split folios within that + * range. + * + * Return: 0 - no error, -1 - unhandled cases + */ +static int gather_after_split_folio_orders(char *vaddr_start, size_t len, + int pagemap_fd, int kpageflags_fd, int orders[], int nr_orders) +{ + uint64_t page_flags = 0; + int cur_order = -1; + char *vaddr; + + if (pagemap_fd == -1 || kpageflags_fd == -1) + return -1; + if (!orders) + return -1; + if (nr_orders <= 0) + return -1; + + for (vaddr = vaddr_start; vaddr < vaddr_start + len;) { + char *next_folio_vaddr; + int status; + + status = vaddr_pageflags_get(vaddr, pagemap_fd, kpageflags_fd, + &page_flags); + if (status < 0) + return -1; + + /* skip non present vaddr */ + if (status == 1) { + vaddr += psize(); + continue; + } + + /* all order-0 pages with possible false postive (non folio) */ + if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) { + orders[0]++; + vaddr += psize(); + continue; + } + + /* skip non thp compound pages */ + if (!(page_flags & KPF_THP)) { + vaddr += psize(); + continue; + } + + /* vpn points to part of a THP at this point */ + if (page_flags & KPF_COMPOUND_HEAD) + cur_order = 1; + else { + vaddr += psize(); + continue; + } + + next_folio_vaddr = vaddr + (1UL << (cur_order + pshift())); + + if (next_folio_vaddr >= vaddr_start + len) + break; + + while ((status = vaddr_pageflags_get(next_folio_vaddr, + pagemap_fd, kpageflags_fd, + &page_flags)) >= 0) { + /* + * non present vaddr, next compound head page, or + * order-0 page + */ + if (status == 1 || + (page_flags & KPF_COMPOUND_HEAD) || + !(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) { + if (cur_order < nr_orders) { + orders[cur_order]++; + cur_order = -1; + vaddr = next_folio_vaddr; + } + break; + } + + cur_order++; + next_folio_vaddr = vaddr + (1UL << (cur_order + pshift())); + } + + if (status < 0) + return status; + } + if (cur_order > 0 && cur_order < nr_orders) + orders[cur_order]++; + return 0; +} + +static int check_after_split_folio_orders(char *vaddr_start, size_t len, + int pagemap_fd, int kpageflags_fd, int orders[], int nr_orders) +{ + int *vaddr_orders; + int status; + int i; + + vaddr_orders = (int *)malloc(sizeof(int) * nr_orders); + + if (!vaddr_orders) + ksft_exit_fail_msg("Cannot allocate memory for vaddr_orders"); + + memset(vaddr_orders, 0, sizeof(int) * nr_orders); + status = gather_after_split_folio_orders(vaddr_start, len, pagemap_fd, + kpageflags_fd, vaddr_orders, nr_orders); + if (status) + ksft_exit_fail_msg("gather folio info failed\n"); + + for (i = 0; i < nr_orders; i++) + if (vaddr_orders[i] != orders[i]) { + ksft_print_msg("order %d: expected: %d got %d\n", i, + orders[i], vaddr_orders[i]); + status = -1; + } + + free(vaddr_orders); + return status; +} + static void write_file(const char *path, const char *buf, size_t buflen) { int fd; -- 2.50.1 Instead of just checking the existence of PMD folios before and after folio split tests, use check_folio_orders() to check after-split folio orders. The split ranges in split_thp_in_pagecache_to_order_at() are changed to [addr, addr + pagesize) for every pmd_pagesize. It prevents folios within the range being split multiple times due to debugfs split function always perform splits with a pagesize step for a given range. The following tests are not changed: 1. split_pte_mapped_thp: the test already uses kpageflags to check; 2. split_file_backed_thp: no vaddr available. Signed-off-by: Zi Yan --- .../selftests/mm/split_huge_page_test.c | 88 ++++++++++++++----- 1 file changed, 64 insertions(+), 24 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index e24df02420ad..a66ecbdfe7c6 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -26,6 +26,7 @@ uint64_t pagesize; unsigned int pageshift; uint64_t pmd_pagesize; unsigned int pmd_order; +int *expected_orders; #define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" #define SMAP_PATH "/proc/self/smaps" @@ -35,6 +36,11 @@ unsigned int pmd_order; #define PID_FMT_OFFSET "%d,0x%lx,0x%lx,%d,%d" #define PATH_FMT "%s,0x%lx,0x%lx,%d" +const char *pagemap_proc = "/proc/self/pagemap"; +const char *kpageflags_proc = "/proc/kpageflags"; +int pagemap_fd; +int kpageflags_fd; + static bool is_backed_by_folio(char *vaddr, int order, int pagemap_fd, int kpageflags_fd) { @@ -366,6 +372,13 @@ static void split_pmd_thp_to_order(int order) if (one_page[i] != (char)i) ksft_exit_fail_msg("%ld byte corrupted\n", i); + memset(expected_orders, 0, sizeof(int) * (pmd_order + 1)); + expected_orders[order] = 4 << (pmd_order - order); + + if (check_after_split_folio_orders(one_page, len, pagemap_fd, + kpageflags_fd, expected_orders, + (pmd_order + 1))) + ksft_exit_fail_msg("Unexpected THP split\n"); if (!check_huge_anon(one_page, 0, pmd_pagesize)) ksft_exit_fail_msg("Still AnonHugePages not split\n"); @@ -380,22 +393,6 @@ static void split_pte_mapped_thp(void) size_t len = 4 * pmd_pagesize; uint64_t thp_size; size_t i; - const char *pagemap_template = "/proc/%d/pagemap"; - const char *kpageflags_proc = "/proc/kpageflags"; - char pagemap_proc[255]; - int pagemap_fd; - int kpageflags_fd; - - if (snprintf(pagemap_proc, 255, pagemap_template, getpid()) < 0) - ksft_exit_fail_msg("get pagemap proc error: %s\n", strerror(errno)); - - pagemap_fd = open(pagemap_proc, O_RDONLY); - if (pagemap_fd == -1) - ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno)); - - kpageflags_fd = open(kpageflags_proc, O_RDONLY); - if (kpageflags_fd == -1) - ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno)); one_page = mmap((void *)(1UL << 30), len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -453,8 +450,6 @@ static void split_pte_mapped_thp(void) ksft_test_result_pass("Split PTE-mapped huge pages successful\n"); munmap(one_page, len); - close(pagemap_fd); - close(kpageflags_fd); } static void split_file_backed_thp(int order) @@ -657,6 +652,7 @@ static void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, int order, int offset) { int fd; + char *split_addr; char *addr; size_t i; char testfile[INPUT_MAX]; @@ -670,14 +666,33 @@ static void split_thp_in_pagecache_to_order_at(size_t fd_size, err = create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr); if (err) return; + err = 0; - if (offset == -1) - write_debugfs(PID_FMT, getpid(), (uint64_t)addr, - (uint64_t)addr + fd_size, order); - else - write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)addr, - (uint64_t)addr + fd_size, order, offset); + memset(expected_orders, 0, sizeof(int) * (pmd_order + 1)); + /* + * use [split_addr, split_addr + pagesize) range to split THPs, since + * the debugfs function always split a range with pagesize step and + * providing a full [addr, addr + fd_size) range can trigger multiple + * splits, complicating after-split result checking. + */ + if (offset == -1) { + for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize) + write_debugfs(PID_FMT, getpid(), (uint64_t)split_addr, + (uint64_t)split_addr + pagesize, order); + + expected_orders[order] = fd_size / (pagesize << order); + } else { + int times = fd_size / pmd_pagesize; + + for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize) + write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)split_addr, + (uint64_t)split_addr + pagesize, order, offset); + + for (i = order + 1; i < pmd_order; i++) + expected_orders[i] = times; + expected_orders[order] = 2 * times; + } for (i = 0; i < fd_size; i++) if (*(addr + i) != (char)i) { @@ -686,6 +701,14 @@ static void split_thp_in_pagecache_to_order_at(size_t fd_size, goto out; } + if (check_after_split_folio_orders(addr, fd_size, pagemap_fd, + kpageflags_fd, expected_orders, + (pmd_order + 1))) { + ksft_print_msg("Unexpected THP split\n"); + err = 1; + goto out; + } + if (!check_huge_file(addr, 0, pmd_pagesize)) { ksft_print_msg("Still FilePmdMapped not split\n"); err = EXIT_FAILURE; @@ -737,9 +760,22 @@ int main(int argc, char **argv) nr_pages = pmd_pagesize / pagesize; pmd_order = sz2ord(pmd_pagesize, pagesize); + + expected_orders = (int *)malloc(sizeof(int) * (pmd_order + 1)); + if (!expected_orders) + ksft_exit_fail_msg("Fail to allocate memory: %s\n", strerror(errno)); + tests = 2 + (pmd_order - 1) + (2 * pmd_order) + (pmd_order - 1) * 4 + 2; ksft_set_plan(tests); + pagemap_fd = open(pagemap_proc, O_RDONLY); + if (pagemap_fd == -1) + ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno)); + + kpageflags_fd = open(kpageflags_proc, O_RDONLY); + if (kpageflags_fd == -1) + ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno)); + fd_size = 2 * pmd_pagesize; split_pmd_zero_pages(); @@ -764,6 +800,10 @@ int main(int argc, char **argv) split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset); cleanup_thp_fs(fs_loc, created_tmp); + close(pagemap_fd); + close(kpageflags_fd); + free(expected_orders); + ksft_finished(); return 0; -- 2.50.1