1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2024 Intel Corporation
4 */
5
6#include <drm/ttm/ttm_backup.h>
7
8#include <linux/export.h>
9#include <linux/page-flags.h>
10#include <linux/swap.h>
11
12/*
13 * Need to map shmem indices to handle since a handle value
14 * of 0 means error, following the swp_entry_t convention.
15 */
16static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
17{
18 return (unsigned long)idx + 1;
19}
20
21static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
22{
23 return handle - 1;
24}
25
26/**
27 * ttm_backup_drop() - release memory associated with a handle
28 * @backup: The struct backup pointer used to obtain the handle
29 * @handle: The handle obtained from the @backup_page function.
30 */
31void ttm_backup_drop(struct file *backup, pgoff_t handle)
32{
33 loff_t start = ttm_backup_handle_to_shmem_idx(handle);
34
35 start <<= PAGE_SHIFT;
36 shmem_truncate_range(inode: file_inode(f: backup), start,
37 end: start + PAGE_SIZE - 1);
38}
39
40/**
41 * ttm_backup_copy_page() - Copy the contents of a previously backed
42 * up page
43 * @backup: The struct backup pointer used to back up the page.
44 * @dst: The struct page to copy into.
45 * @handle: The handle returned when the page was backed up.
46 * @intr: Try to perform waits interruptible or at least killable.
47 *
48 * Return: 0 on success, Negative error code on failure, notably
49 * -EINTR if @intr was set to true and a signal is pending.
50 */
51int ttm_backup_copy_page(struct file *backup, struct page *dst,
52 pgoff_t handle, bool intr)
53{
54 struct address_space *mapping = backup->f_mapping;
55 struct folio *from_folio;
56 pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
57
58 from_folio = shmem_read_folio(mapping, index: idx);
59 if (IS_ERR(ptr: from_folio))
60 return PTR_ERR(ptr: from_folio);
61
62 copy_highpage(to: dst, from: folio_file_page(folio: from_folio, index: idx));
63 folio_put(folio: from_folio);
64
65 return 0;
66}
67
68/**
69 * ttm_backup_backup_page() - Backup a page
70 * @backup: The struct backup pointer to use.
71 * @page: The page to back up.
72 * @writeback: Whether to perform immediate writeback of the page.
73 * This may have performance implications.
74 * @idx: A unique integer for each page and each struct backup.
75 * This allows the backup implementation to avoid managing
76 * its address space separately.
77 * @page_gfp: The gfp value used when the page was allocated.
78 * This is used for accounting purposes.
79 * @alloc_gfp: The gfp to be used when allocating memory.
80 *
81 * Context: If called from reclaim context, the caller needs to
82 * assert that the shrinker gfp has __GFP_FS set, to avoid
83 * deadlocking on lock_page(). If @writeback is set to true and
84 * called from reclaim context, the caller also needs to assert
85 * that the shrinker gfp has __GFP_IO set, since without it,
86 * we're not allowed to start backup IO.
87 *
88 * Return: A handle on success. Negative error code on failure.
89 *
90 * Note: This function could be extended to back up a folio and
91 * implementations would then split the folio internally if needed.
92 * Drawback is that the caller would then have to keep track of
93 * the folio size- and usage.
94 */
95s64
96ttm_backup_backup_page(struct file *backup, struct page *page,
97 bool writeback, pgoff_t idx, gfp_t page_gfp,
98 gfp_t alloc_gfp)
99{
100 struct address_space *mapping = backup->f_mapping;
101 unsigned long handle = 0;
102 struct folio *to_folio;
103 int ret;
104
105 to_folio = shmem_read_folio_gfp(mapping, index: idx, gfp: alloc_gfp);
106 if (IS_ERR(ptr: to_folio))
107 return PTR_ERR(ptr: to_folio);
108
109 folio_mark_accessed(to_folio);
110 folio_lock(folio: to_folio);
111 folio_mark_dirty(folio: to_folio);
112 copy_highpage(to: folio_file_page(folio: to_folio, index: idx), from: page);
113 handle = ttm_backup_shmem_idx_to_handle(idx);
114
115 if (writeback && !folio_mapped(folio: to_folio) &&
116 folio_clear_dirty_for_io(folio: to_folio)) {
117 folio_set_reclaim(folio: to_folio);
118 ret = shmem_writeout(folio: to_folio, NULL, NULL);
119 if (!folio_test_writeback(folio: to_folio))
120 folio_clear_reclaim(folio: to_folio);
121 /*
122 * If writeout succeeds, it unlocks the folio. errors
123 * are otherwise dropped, since writeout is only best
124 * effort here.
125 */
126 if (ret)
127 folio_unlock(folio: to_folio);
128 } else {
129 folio_unlock(folio: to_folio);
130 }
131
132 folio_put(folio: to_folio);
133
134 return handle;
135}
136
137/**
138 * ttm_backup_fini() - Free the struct backup resources after last use.
139 * @backup: Pointer to the struct backup whose resources to free.
140 *
141 * After a call to this function, it's illegal to use the @backup pointer.
142 */
143void ttm_backup_fini(struct file *backup)
144{
145 fput(backup);
146}
147
148/**
149 * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
150 * left for backup.
151 *
152 * This function is intended also for driver use to indicate whether a
153 * backup attempt is meaningful.
154 *
155 * Return: An approximate size of backup space available.
156 */
157u64 ttm_backup_bytes_avail(void)
158{
159 /*
160 * The idea behind backing up to shmem is that shmem objects may
161 * eventually be swapped out. So no point swapping out if there
162 * is no or low swap-space available. But the accuracy of this
163 * number also depends on shmem actually swapping out backed-up
164 * shmem objects without too much buffering.
165 */
166 return (u64)get_nr_swap_pages() << PAGE_SHIFT;
167}
168EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
169
170/**
171 * ttm_backup_shmem_create() - Create a shmem-based struct backup.
172 * @size: The maximum size (in bytes) to back up.
173 *
174 * Create a backup utilizing shmem objects.
175 *
176 * Return: A pointer to a struct file on success,
177 * an error pointer on error.
178 */
179struct file *ttm_backup_shmem_create(loff_t size)
180{
181 return shmem_file_setup(name: "ttm shmem backup", size, flags: 0);
182}
183