1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_ZSWAP_H
3#define _LINUX_ZSWAP_H
4
5#include <linux/types.h>
6#include <linux/mm_types.h>
7
8struct lruvec;
9
10extern atomic_long_t zswap_stored_pages;
11
12#ifdef CONFIG_ZSWAP
13
14struct zswap_lruvec_state {
15 /*
16 * Number of swapped in pages from disk, i.e not found in the zswap pool.
17 *
18 * This is consumed and subtracted from the lru size in
19 * zswap_shrinker_count() to penalize past overshrinking that led to disk
20 * swapins. The idea is that had we considered this many more pages in the
21 * LRU active/protected and not written them back, we would not have had to
22 * swapped them in.
23 */
24 atomic_long_t nr_disk_swapins;
25};
26
27unsigned long zswap_total_pages(void);
28bool zswap_store(struct folio *folio);
29int zswap_load(struct folio *folio);
30void zswap_invalidate(swp_entry_t swp);
31int zswap_swapon(int type, unsigned long nr_pages);
32void zswap_swapoff(int type);
33void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
34void zswap_lruvec_state_init(struct lruvec *lruvec);
35void zswap_folio_swapin(struct folio *folio);
36bool zswap_is_enabled(void);
37bool zswap_never_enabled(void);
38#else
39
40struct zswap_lruvec_state {};
41
42static inline bool zswap_store(struct folio *folio)
43{
44 return false;
45}
46
47static inline int zswap_load(struct folio *folio)
48{
49 return -ENOENT;
50}
51
52static inline void zswap_invalidate(swp_entry_t swp) {}
53static inline int zswap_swapon(int type, unsigned long nr_pages)
54{
55 return 0;
56}
57static inline void zswap_swapoff(int type) {}
58static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
59static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
60static inline void zswap_folio_swapin(struct folio *folio) {}
61
62static inline bool zswap_is_enabled(void)
63{
64 return false;
65}
66
67static inline bool zswap_never_enabled(void)
68{
69 return true;
70}
71
72#endif
73
74#endif /* _LINUX_ZSWAP_H */
75