1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PAGE_COUNTER_H
3#define _LINUX_PAGE_COUNTER_H
4
5#include <linux/atomic.h>
6#include <linux/cache.h>
7#include <linux/limits.h>
8#include <asm/page.h>
9
10struct page_counter {
11 /*
12 * Make sure 'usage' does not share cacheline with any other field in
13 * v2. The memcg->memory.usage is a hot member of struct mem_cgroup.
14 */
15 atomic_long_t usage;
16 unsigned long failcnt; /* v1-only field */
17
18 CACHELINE_PADDING(_pad1_);
19
20 /* effective memory.min and memory.min usage tracking */
21 unsigned long emin;
22 atomic_long_t min_usage;
23 atomic_long_t children_min_usage;
24
25 /* effective memory.low and memory.low usage tracking */
26 unsigned long elow;
27 atomic_long_t low_usage;
28 atomic_long_t children_low_usage;
29
30 unsigned long watermark;
31 /* Latest cg2 reset watermark */
32 unsigned long local_watermark;
33
34 /* Keep all the read most fields in a separete cacheline. */
35 CACHELINE_PADDING(_pad2_);
36
37 bool protection_support;
38 bool track_failcnt;
39 unsigned long min;
40 unsigned long low;
41 unsigned long high;
42 unsigned long max;
43 struct page_counter *parent;
44} ____cacheline_internodealigned_in_smp;
45
46#if BITS_PER_LONG == 32
47#define PAGE_COUNTER_MAX LONG_MAX
48#else
49#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
50#endif
51
52/*
53 * Protection is supported only for the first counter (with id 0).
54 */
55static inline void page_counter_init(struct page_counter *counter,
56 struct page_counter *parent,
57 bool protection_support)
58{
59 counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
60 counter->max = PAGE_COUNTER_MAX;
61 counter->parent = parent;
62 counter->protection_support = protection_support;
63 counter->track_failcnt = false;
64}
65
66static inline unsigned long page_counter_read(struct page_counter *counter)
67{
68 return atomic_long_read(v: &counter->usage);
69}
70
71void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
72void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
73bool page_counter_try_charge(struct page_counter *counter,
74 unsigned long nr_pages,
75 struct page_counter **fail);
76void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
77void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
78void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
79
80static inline void page_counter_set_high(struct page_counter *counter,
81 unsigned long nr_pages)
82{
83 WRITE_ONCE(counter->high, nr_pages);
84}
85
86int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
87int page_counter_memparse(const char *buf, const char *max,
88 unsigned long *nr_pages);
89
90static inline void page_counter_reset_watermark(struct page_counter *counter)
91{
92 unsigned long usage = page_counter_read(counter);
93
94 /*
95 * Update local_watermark first, so it's always <= watermark
96 * (modulo CPU/compiler re-ordering)
97 */
98 counter->local_watermark = usage;
99 counter->watermark = usage;
100}
101
102#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM)
103void page_counter_calculate_protection(struct page_counter *root,
104 struct page_counter *counter,
105 bool recursive_protection);
106#else
107static inline void page_counter_calculate_protection(struct page_counter *root,
108 struct page_counter *counter,
109 bool recursive_protection) {}
110#endif
111
112#endif /* _LINUX_PAGE_COUNTER_H */
113