1// SPDX-License-Identifier: GPL-2.0
2/*
3 * HugeTLB Vmemmap Optimization (HVO)
4 *
5 * Copyright (c) 2020, ByteDance. All rights reserved.
6 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 */
9#ifndef _LINUX_HUGETLB_VMEMMAP_H
10#define _LINUX_HUGETLB_VMEMMAP_H
11#include <linux/hugetlb.h>
12#include <linux/io.h>
13#include <linux/memblock.h>
14
15/*
16 * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
17 * Documentation/mm/vmemmap_dedup.rst.
18 */
19#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
20#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
21
22#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
23int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
24long hugetlb_vmemmap_restore_folios(const struct hstate *h,
25 struct list_head *folio_list,
26 struct list_head *non_hvo_folios);
27void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
28void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
29void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list);
30#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
31void hugetlb_vmemmap_init_early(int nid);
32void hugetlb_vmemmap_init_late(int nid);
33#endif
34
35
36static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
37{
38 return pages_per_huge_page(h) * sizeof(struct page);
39}
40
41/*
42 * Return how many vmemmap size associated with a HugeTLB page that can be
43 * optimized and can be freed to the buddy allocator.
44 */
45static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
46{
47 int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
48
49 if (!is_power_of_2(n: sizeof(struct page)))
50 return 0;
51 return size > 0 ? size : 0;
52}
53#else
54static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
55{
56 return 0;
57}
58
59static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h,
60 struct list_head *folio_list,
61 struct list_head *non_hvo_folios)
62{
63 list_splice_init(folio_list, non_hvo_folios);
64 return 0;
65}
66
67static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
68{
69}
70
71static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
72{
73}
74
75static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h,
76 struct list_head *folio_list)
77{
78}
79
80static inline void hugetlb_vmemmap_init_early(int nid)
81{
82}
83
84static inline void hugetlb_vmemmap_init_late(int nid)
85{
86}
87
88static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
89{
90 return 0;
91}
92#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
93
94static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
95{
96 return hugetlb_vmemmap_optimizable_size(h) != 0;
97}
98#endif /* _LINUX_HUGETLB_VMEMMAP_H */
99