They are useful information for debugging split huge page tests. Signed-off-by: Zi Yan Reviewed-by: Wei Yang Reviewed-by: Donet Tom Reviewed-by: wang lian Reviewed-by: Baolin Wang Reviewed-by: Barry Song Acked-by: David Hildenbrand --- mm/huge_memory.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b8bb078a1a34..22dc727d1d27 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4319,8 +4319,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, goto out; } - pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", - pid, vaddr_start, vaddr_end); + pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n", + pid, vaddr_start, vaddr_end, new_order, in_folio_offset); mmap_read_lock(mm); /* @@ -4430,8 +4430,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, if (IS_ERR(candidate)) goto out; - pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", - file_path, off_start, off_end); + pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx], new_order: %u, in_folio_offset: %ld\n", + file_path, off_start, off_end, new_order, in_folio_offset); mapping = candidate->f_mapping; min_order = mapping_min_folio_order(mapping); -- 2.47.2 The helper gathers an folio order statistics of folios within a virtual address range and checks it against a given order list. It aims to provide a more precise folio order check instead of just checking the existence of PMD folios. Signed-off-by: Zi Yan --- .../selftests/mm/split_huge_page_test.c | 4 +- tools/testing/selftests/mm/vm_util.c | 173 ++++++++++++++++++ tools/testing/selftests/mm/vm_util.h | 7 + 3 files changed, 181 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 5d07b0b89226..63ac82f0b9e0 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -34,8 +34,6 @@ uint64_t pmd_pagesize; #define PID_FMT_OFFSET "%d,0x%lx,0x%lx,%d,%d" #define PATH_FMT "%s,0x%lx,0x%lx,%d" -#define PFN_MASK ((1UL<<55)-1) -#define KPF_THP (1UL<<22) #define GET_ORDER(nr_pages) (31 - __builtin_clz(nr_pages)) int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) @@ -49,7 +47,7 @@ int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) if (kpageflags_file) { pread(kpageflags_file, &page_flags, sizeof(page_flags), - (paddr & PFN_MASK) * sizeof(page_flags)); + PAGEMAP_PFN(paddr) * sizeof(page_flags)); return !!(page_flags & KPF_THP); } diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c index 6a239aa413e2..4d952d1bc96d 100644 --- a/tools/testing/selftests/mm/vm_util.c +++ b/tools/testing/selftests/mm/vm_util.c @@ -338,6 +338,179 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max) return count; } +static int get_pfn_flags(unsigned long pfn, int kpageflags_fd, uint64_t *flags) +{ + size_t count; + + count = pread(kpageflags_fd, flags, sizeof(*flags), + pfn * sizeof(*flags)); + + if (count != sizeof(*flags)) + return -1; + + return 0; +} + +static int get_page_flags(char *vaddr, int pagemap_fd, int kpageflags_fd, + uint64_t *flags) +{ + unsigned long pfn; + + pfn = pagemap_get_pfn(pagemap_fd, vaddr); + /* + * Treat non-present page as a page without any flag, so that + * gather_folio_orders() just record the current folio order. + */ + if (pfn == -1UL) { + *flags = 0; + return 1; + } + + if (get_pfn_flags(pfn, kpageflags_fd, flags)) + return -1; + + return 0; +} + +/* + * gather_folio_orders - scan through [vaddr_start, len) and record folio orders + * @vaddr_start: start vaddr + * @len: range length + * @pagemap_fd: file descriptor to /proc//pagemap + * @kpageflags_fd: file descriptor to /proc/kpageflags + * @orders: output folio order array + * @nr_orders: folio order array size + * + * gather_folio_orders() scan through [vaddr_start, len) and check all folios + * within the range and record their orders. All order-0 pages will be recorded. + * Non-present vaddr is skipped. + * + * + * Return: 0 - no error, -1 - unhandled cases + */ +static int gather_folio_orders(char *vaddr_start, size_t len, + int pagemap_fd, int kpageflags_fd, + int orders[], int nr_orders) +{ + uint64_t page_flags = 0; + int cur_order = -1; + char *vaddr; + + if (!pagemap_fd || !kpageflags_fd) + return -1; + if (nr_orders <= 0) + return -1; + + for (vaddr = vaddr_start; vaddr < vaddr_start + len;) { + char *next_folio_vaddr; + int status; + + status = get_page_flags(vaddr, pagemap_fd, kpageflags_fd, + &page_flags); + if (status < 0) + return -1; + + /* skip non present vaddr */ + if (status == 1) { + vaddr += psize(); + continue; + } + + /* all order-0 pages with possible false postive (non folio) */ + if (!(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) { + orders[0]++; + vaddr += psize(); + continue; + } + + /* skip non thp compound pages */ + if (!(page_flags & KPF_THP)) { + vaddr += psize(); + continue; + } + + /* vpn points to part of a THP at this point */ + if (page_flags & KPF_COMPOUND_HEAD) + cur_order = 1; + else { + /* not a head nor a tail in a THP? */ + if (!(page_flags & KPF_COMPOUND_TAIL)) + return -1; + + vaddr += psize(); + continue; + } + + next_folio_vaddr = vaddr + (1UL << (cur_order + pshift())); + + if (next_folio_vaddr >= vaddr_start + len) + break; + + while ((status = get_page_flags(next_folio_vaddr, pagemap_fd, + kpageflags_fd, + &page_flags)) >= 0) { + /* + * non present vaddr, next compound head page, or + * order-0 page + */ + if (status == 1 || + (page_flags & KPF_COMPOUND_HEAD) || + !(page_flags & (KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL))) { + if (cur_order < nr_orders) { + orders[cur_order]++; + cur_order = -1; + vaddr = next_folio_vaddr; + } + break; + } + + /* not a head nor a tail in a THP? */ + if (!(page_flags & KPF_COMPOUND_TAIL)) + return -1; + + cur_order++; + next_folio_vaddr = vaddr + (1UL << (cur_order + pshift())); + } + + if (status < 0) + return status; + } + if (cur_order > 0 && cur_order < nr_orders) + orders[cur_order]++; + return 0; +} + +int check_folio_orders(char *vaddr_start, size_t len, int pagemap_fd, + int kpageflags_fd, int orders[], int nr_orders) +{ + int *vaddr_orders; + int status; + int i; + + vaddr_orders = (int *)malloc(sizeof(int) * nr_orders); + + if (!vaddr_orders) + ksft_exit_fail_msg("Cannot allocate memory for vaddr_orders"); + + memset(vaddr_orders, 0, sizeof(int) * nr_orders); + status = gather_folio_orders(vaddr_start, len, pagemap_fd, + kpageflags_fd, vaddr_orders, nr_orders); + if (status) + goto out; + + status = 0; + for (i = 0; i < nr_orders; i++) + if (vaddr_orders[i] != orders[i]) { + ksft_print_msg("order %d: expected: %d got %d\n", i, + orders[i], vaddr_orders[i]); + status = -1; + } + +out: + free(vaddr_orders); + return status; +} + /* If `ioctls' non-NULL, the allowed ioctls will be returned into the var */ int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor, uint64_t *ioctls) diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index 1843ad48d32b..02e3f1e7065b 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -18,6 +18,11 @@ #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) +#define KPF_COMPOUND_HEAD BIT_ULL(15) +#define KPF_COMPOUND_TAIL BIT_ULL(16) +#define KPF_THP BIT_ULL(22) + + /* * Ignore the checkpatch warning, we must read from x but don't want to do * anything with it in order to trigger a read page fault. We therefore must use @@ -85,6 +90,8 @@ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size); int64_t allocate_transhuge(void *ptr, int pagemap_fd); unsigned long default_huge_page_size(void); int detect_hugetlb_page_sizes(size_t sizes[], int max); +int check_folio_orders(char *vaddr_start, size_t len, int pagemap_file, + int kpageflags_file, int orders[], int nr_orders); int uffd_register(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor); -- 2.47.2 and rename it to is_backed_by_folio(). is_backed_by_folio() checks if the given vaddr is backed a folio with a given order. It does so by: 1. getting the pfn of the vaddr; 2. checking kpageflags of the pfn; if order is greater than 0: 3. checking kpageflags of the head pfn; 4. checking kpageflags of all tail pfns. pmd_order is added to split_huge_page_test.c and replaces max_order. Signed-off-by: Zi Yan --- .../selftests/mm/split_huge_page_test.c | 67 +++++++++++++------ tools/testing/selftests/mm/vm_util.c | 2 +- tools/testing/selftests/mm/vm_util.h | 1 + 3 files changed, 48 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 63ac82f0b9e0..3aaf783f339f 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -25,6 +25,7 @@ uint64_t pagesize; unsigned int pageshift; uint64_t pmd_pagesize; +unsigned int pmd_order; #define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" #define SMAP_PATH "/proc/self/smaps" @@ -36,23 +37,48 @@ uint64_t pmd_pagesize; #define GET_ORDER(nr_pages) (31 - __builtin_clz(nr_pages)) -int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) +int is_backed_by_folio(char *vaddr, int order, int pagemap_fd, int kpageflags_fd) { - uint64_t paddr; - uint64_t page_flags; + unsigned long pfn_head; + uint64_t pfn_flags; + unsigned long pfn; + unsigned long i; - if (pagemap_file) { - pread(pagemap_file, &paddr, sizeof(paddr), - ((long)vaddr >> pageshift) * sizeof(paddr)); + if (!pagemap_fd || !kpageflags_fd) + return 0; - if (kpageflags_file) { - pread(kpageflags_file, &page_flags, sizeof(page_flags), - PAGEMAP_PFN(paddr) * sizeof(page_flags)); + pfn = pagemap_get_pfn(pagemap_fd, vaddr); - return !!(page_flags & KPF_THP); - } + if (pfn == -1UL) + return 0; + + if (get_pfn_flags(pfn, kpageflags_fd, &pfn_flags)) + return 0; + + if (!order) { + if (pfn_flags & (KPF_THP | KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL)) + return 0; + return 1; } - return 0; + + if (!(pfn_flags & KPF_THP)) + return 0; + + pfn_head = pfn & ~((1 << order) - 1); + + if (get_pfn_flags(pfn_head, kpageflags_fd, &pfn_flags)) + return 0; + + if (!(pfn_flags & (KPF_THP | KPF_COMPOUND_HEAD))) + return 0; + + for (i = 1; i < (1UL << order) - 1; i++) { + if (get_pfn_flags(pfn_head + i, kpageflags_fd, &pfn_flags)) + return 0; + if (!(pfn_flags & (KPF_THP | KPF_COMPOUND_TAIL))) + return 0; + } + return 1; } static void write_file(const char *path, const char *buf, size_t buflen) @@ -233,7 +259,7 @@ void split_pte_mapped_thp(void) thp_size = 0; for (i = 0; i < pagesize * 4; i++) if (i % pagesize == 0 && - is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + is_backed_by_folio(&pte_mapped[i], pmd_order, pagemap_fd, kpageflags_fd)) thp_size++; if (thp_size != 4) @@ -250,7 +276,7 @@ void split_pte_mapped_thp(void) ksft_exit_fail_msg("%ld byte corrupted\n", i); if (i % pagesize == 0 && - is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + !is_backed_by_folio(&pte_mapped[i], 0, pagemap_fd, kpageflags_fd)) thp_size++; } @@ -522,7 +548,6 @@ int main(int argc, char **argv) const char *fs_loc; bool created_tmp; int offset; - unsigned int max_order; unsigned int nr_pages; unsigned int tests; @@ -543,28 +568,28 @@ int main(int argc, char **argv) ksft_exit_fail_msg("Reading PMD pagesize failed\n"); nr_pages = pmd_pagesize / pagesize; - max_order = GET_ORDER(nr_pages); - tests = 2 + (max_order - 1) + (2 * max_order) + (max_order - 1) * 4 + 2; + pmd_order = GET_ORDER(nr_pages); + tests = 2 + (pmd_order - 1) + (2 * pmd_order) + (pmd_order - 1) * 4 + 2; ksft_set_plan(tests); fd_size = 2 * pmd_pagesize; split_pmd_zero_pages(); - for (i = 0; i < max_order; i++) + for (i = 0; i < pmd_order; i++) if (i != 1) split_pmd_thp_to_order(i); split_pte_mapped_thp(); - for (i = 0; i < max_order; i++) + for (i = 0; i < pmd_order; i++) split_file_backed_thp(i); created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template, &fs_loc); - for (i = max_order - 1; i >= 0; i--) + for (i = pmd_order - 1; i >= 0; i--) split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1); - for (i = 0; i < max_order; i++) + for (i = 0; i < pmd_order; i++) for (offset = 0; offset < nr_pages; offset += MAX(nr_pages / 4, 1 << i)) diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c index 4d952d1bc96d..193ba1a1a3cc 100644 --- a/tools/testing/selftests/mm/vm_util.c +++ b/tools/testing/selftests/mm/vm_util.c @@ -338,7 +338,7 @@ int detect_hugetlb_page_sizes(size_t sizes[], int max) return count; } -static int get_pfn_flags(unsigned long pfn, int kpageflags_fd, uint64_t *flags) +int get_pfn_flags(unsigned long pfn, int kpageflags_fd, uint64_t *flags) { size_t count; diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index 02e3f1e7065b..148b792cff0f 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -92,6 +92,7 @@ unsigned long default_huge_page_size(void); int detect_hugetlb_page_sizes(size_t sizes[], int max); int check_folio_orders(char *vaddr_start, size_t len, int pagemap_file, int kpageflags_file, int orders[], int nr_orders); +int get_pfn_flags(unsigned long pfn, int kpageflags_fd, uint64_t *flags); int uffd_register(int uffd, void *addr, uint64_t len, bool miss, bool wp, bool minor); -- 2.47.2 Instead of just checking the existence of PMD folios before and after folio split tests, use check_folio_orders() to check after-split folio orders. The following tests are not changed: 1. split_pte_mapped_thp: the test already uses kpageflags to check; 2. split_file_backed_thp: no vaddr available. Signed-off-by: Zi Yan --- .../selftests/mm/split_huge_page_test.c | 85 +++++++++++++------ 1 file changed, 61 insertions(+), 24 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 3aaf783f339f..1ea2c7f22962 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -26,6 +26,7 @@ uint64_t pagesize; unsigned int pageshift; uint64_t pmd_pagesize; unsigned int pmd_order; +int *expected_orders; #define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" #define SMAP_PATH "/proc/self/smaps" @@ -37,6 +38,11 @@ unsigned int pmd_order; #define GET_ORDER(nr_pages) (31 - __builtin_clz(nr_pages)) +const char *pagemap_proc = "/proc/self/pagemap"; +const char *kpageflags_proc = "/proc/kpageflags"; +int pagemap_fd; +int kpageflags_fd; + int is_backed_by_folio(char *vaddr, int order, int pagemap_fd, int kpageflags_fd) { unsigned long pfn_head; @@ -49,18 +55,21 @@ int is_backed_by_folio(char *vaddr, int order, int pagemap_fd, int kpageflags_fd pfn = pagemap_get_pfn(pagemap_fd, vaddr); + /* non present page */ if (pfn == -1UL) return 0; if (get_pfn_flags(pfn, kpageflags_fd, &pfn_flags)) return 0; + /* check for order-0 pages */ if (!order) { if (pfn_flags & (KPF_THP | KPF_COMPOUND_HEAD | KPF_COMPOUND_TAIL)) return 0; return 1; } + /* non THP folio */ if (!(pfn_flags & KPF_THP)) return 0; @@ -69,9 +78,11 @@ int is_backed_by_folio(char *vaddr, int order, int pagemap_fd, int kpageflags_fd if (get_pfn_flags(pfn_head, kpageflags_fd, &pfn_flags)) return 0; + /* head PFN has no compound_head flag set */ if (!(pfn_flags & (KPF_THP | KPF_COMPOUND_HEAD))) return 0; + /* check all tail PFN flags */ for (i = 1; i < (1UL << order) - 1; i++) { if (get_pfn_flags(pfn_head + i, kpageflags_fd, &pfn_flags)) return 0; @@ -198,6 +209,12 @@ void split_pmd_thp_to_order(int order) if (one_page[i] != (char)i) ksft_exit_fail_msg("%ld byte corrupted\n", i); + memset(expected_orders, 0, sizeof(int) * (pmd_order + 1)); + expected_orders[order] = 4 << (pmd_order - order); + + if (check_folio_orders(one_page, len, pagemap_fd, kpageflags_fd, + expected_orders, (pmd_order + 1))) + ksft_exit_fail_msg("Unexpected THP split\n"); if (!check_huge_anon(one_page, 0, pmd_pagesize)) ksft_exit_fail_msg("Still AnonHugePages not split\n"); @@ -212,22 +229,6 @@ void split_pte_mapped_thp(void) size_t len = 4 * pmd_pagesize; uint64_t thp_size; size_t i; - const char *pagemap_template = "/proc/%d/pagemap"; - const char *kpageflags_proc = "/proc/kpageflags"; - char pagemap_proc[255]; - int pagemap_fd; - int kpageflags_fd; - - if (snprintf(pagemap_proc, 255, pagemap_template, getpid()) < 0) - ksft_exit_fail_msg("get pagemap proc error: %s\n", strerror(errno)); - - pagemap_fd = open(pagemap_proc, O_RDONLY); - if (pagemap_fd == -1) - ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno)); - - kpageflags_fd = open(kpageflags_proc, O_RDONLY); - if (kpageflags_fd == -1) - ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno)); one_page = mmap((void *)(1UL << 30), len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -285,8 +286,6 @@ void split_pte_mapped_thp(void) ksft_test_result_pass("Split PTE-mapped huge pages successful\n"); munmap(one_page, len); - close(pagemap_fd); - close(kpageflags_fd); } void split_file_backed_thp(int order) @@ -489,6 +488,7 @@ void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, int order, int offset) { int fd; + char *split_addr; char *addr; size_t i; char testfile[INPUT_MAX]; @@ -502,14 +502,27 @@ void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, err = create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr); if (err) return; + err = 0; - if (offset == -1) - write_debugfs(PID_FMT, getpid(), (uint64_t)addr, - (uint64_t)addr + fd_size, order); - else - write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)addr, - (uint64_t)addr + fd_size, order, offset); + memset(expected_orders, 0, sizeof(int) * (pmd_order + 1)); + if (offset == -1) { + for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize) + write_debugfs(PID_FMT, getpid(), (uint64_t)split_addr, + (uint64_t)split_addr + pagesize, order); + + expected_orders[order] = fd_size / (pagesize << order); + } else { + int times = fd_size / pmd_pagesize; + + for (split_addr = addr; split_addr < addr + fd_size; split_addr += pmd_pagesize) + write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)split_addr, + (uint64_t)split_addr + pagesize, order, offset); + + for (i = order + 1; i < pmd_order; i++) + expected_orders[i] = times; + expected_orders[order] = 2 * times; + } for (i = 0; i < fd_size; i++) if (*(addr + i) != (char)i) { @@ -518,6 +531,13 @@ void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, goto out; } + if (check_folio_orders(addr, fd_size, pagemap_fd, kpageflags_fd, + expected_orders, (pmd_order + 1))) { + ksft_print_msg("Unexpected THP split\n"); + err = 1; + goto out; + } + if (!check_huge_file(addr, 0, pmd_pagesize)) { ksft_print_msg("Still FilePmdMapped not split\n"); err = EXIT_FAILURE; @@ -569,9 +589,22 @@ int main(int argc, char **argv) nr_pages = pmd_pagesize / pagesize; pmd_order = GET_ORDER(nr_pages); + + expected_orders = (int *)malloc(sizeof(int) * (pmd_order + 1)); + if (!expected_orders) + ksft_exit_fail_msg("Fail to allocate memory: %s\n", strerror(errno)); + tests = 2 + (pmd_order - 1) + (2 * pmd_order) + (pmd_order - 1) * 4 + 2; ksft_set_plan(tests); + pagemap_fd = open(pagemap_proc, O_RDONLY); + if (pagemap_fd == -1) + ksft_exit_fail_msg("read pagemap: %s\n", strerror(errno)); + + kpageflags_fd = open(kpageflags_proc, O_RDONLY); + if (kpageflags_fd == -1) + ksft_exit_fail_msg("read kpageflags: %s\n", strerror(errno)); + fd_size = 2 * pmd_pagesize; split_pmd_zero_pages(); @@ -596,6 +629,10 @@ int main(int argc, char **argv) split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset); cleanup_thp_fs(fs_loc, created_tmp); + close(pagemap_fd); + close(kpageflags_fd); + free(expected_orders); + ksft_finished(); return 0; -- 2.47.2