| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | #include <linux/pagewalk.h> | 
|---|
| 3 | #include <linux/mm_inline.h> | 
|---|
| 4 | #include <linux/hugetlb.h> | 
|---|
| 5 | #include <linux/huge_mm.h> | 
|---|
| 6 | #include <linux/mount.h> | 
|---|
| 7 | #include <linux/ksm.h> | 
|---|
| 8 | #include <linux/seq_file.h> | 
|---|
| 9 | #include <linux/highmem.h> | 
|---|
| 10 | #include <linux/ptrace.h> | 
|---|
| 11 | #include <linux/slab.h> | 
|---|
| 12 | #include <linux/pagemap.h> | 
|---|
| 13 | #include <linux/mempolicy.h> | 
|---|
| 14 | #include <linux/rmap.h> | 
|---|
| 15 | #include <linux/swap.h> | 
|---|
| 16 | #include <linux/sched/mm.h> | 
|---|
| 17 | #include <linux/swapops.h> | 
|---|
| 18 | #include <linux/mmu_notifier.h> | 
|---|
| 19 | #include <linux/page_idle.h> | 
|---|
| 20 | #include <linux/shmem_fs.h> | 
|---|
| 21 | #include <linux/uaccess.h> | 
|---|
| 22 | #include <linux/pkeys.h> | 
|---|
| 23 | #include <linux/minmax.h> | 
|---|
| 24 | #include <linux/overflow.h> | 
|---|
| 25 | #include <linux/buildid.h> | 
|---|
| 26 |  | 
|---|
| 27 | #include <asm/elf.h> | 
|---|
| 28 | #include <asm/tlb.h> | 
|---|
| 29 | #include <asm/tlbflush.h> | 
|---|
| 30 | #include "internal.h" | 
|---|
| 31 |  | 
|---|
| 32 | #define SENTINEL_VMA_END	-1 | 
|---|
| 33 | #define SENTINEL_VMA_GATE	-2 | 
|---|
| 34 |  | 
|---|
| 35 | #define SEQ_PUT_DEC(str, val) \ | 
|---|
| 36 | seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) | 
|---|
| 37 | void task_mem(struct seq_file *m, struct mm_struct *mm) | 
|---|
| 38 | { | 
|---|
| 39 | unsigned long text, lib, swap, anon, file, shmem; | 
|---|
| 40 | unsigned long hiwater_vm, total_vm, , ; | 
|---|
| 41 |  | 
|---|
| 42 | anon = get_mm_counter_sum(mm, member: MM_ANONPAGES); | 
|---|
| 43 | file = get_mm_counter_sum(mm, member: MM_FILEPAGES); | 
|---|
| 44 | shmem = get_mm_counter_sum(mm, member: MM_SHMEMPAGES); | 
|---|
| 45 |  | 
|---|
| 46 | /* | 
|---|
| 47 | * Note: to minimize their overhead, mm maintains hiwater_vm and | 
|---|
| 48 | * hiwater_rss only when about to *lower* total_vm or rss.  Any | 
|---|
| 49 | * collector of these hiwater stats must therefore get total_vm | 
|---|
| 50 | * and rss too, which will usually be the higher.  Barriers? not | 
|---|
| 51 | * worth the effort, such snapshots can always be inconsistent. | 
|---|
| 52 | */ | 
|---|
| 53 | hiwater_vm = total_vm = mm->total_vm; | 
|---|
| 54 | if (hiwater_vm < mm->hiwater_vm) | 
|---|
| 55 | hiwater_vm = mm->hiwater_vm; | 
|---|
| 56 | hiwater_rss = total_rss = anon + file + shmem; | 
|---|
| 57 | if (hiwater_rss < mm->hiwater_rss) | 
|---|
| 58 | hiwater_rss = mm->hiwater_rss; | 
|---|
| 59 |  | 
|---|
| 60 | /* split executable areas between text and lib */ | 
|---|
| 61 | text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); | 
|---|
| 62 | text = min(text, mm->exec_vm << PAGE_SHIFT); | 
|---|
| 63 | lib = (mm->exec_vm << PAGE_SHIFT) - text; | 
|---|
| 64 |  | 
|---|
| 65 | swap = get_mm_counter_sum(mm, member: MM_SWAPENTS); | 
|---|
| 66 | SEQ_PUT_DEC( "VmPeak:\t", hiwater_vm); | 
|---|
| 67 | SEQ_PUT_DEC( " kB\nVmSize:\t", total_vm); | 
|---|
| 68 | SEQ_PUT_DEC( " kB\nVmLck:\t", mm->locked_vm); | 
|---|
| 69 | SEQ_PUT_DEC( " kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); | 
|---|
| 70 | SEQ_PUT_DEC( " kB\nVmHWM:\t", hiwater_rss); | 
|---|
| 71 | SEQ_PUT_DEC( " kB\nVmRSS:\t", total_rss); | 
|---|
| 72 | SEQ_PUT_DEC( " kB\nRssAnon:\t", anon); | 
|---|
| 73 | SEQ_PUT_DEC( " kB\nRssFile:\t", file); | 
|---|
| 74 | SEQ_PUT_DEC( " kB\nRssShmem:\t", shmem); | 
|---|
| 75 | SEQ_PUT_DEC( " kB\nVmData:\t", mm->data_vm); | 
|---|
| 76 | SEQ_PUT_DEC( " kB\nVmStk:\t", mm->stack_vm); | 
|---|
| 77 | seq_put_decimal_ull_width(m, | 
|---|
| 78 | delimiter: " kB\nVmExe:\t", num: text >> 10, width: 8); | 
|---|
| 79 | seq_put_decimal_ull_width(m, | 
|---|
| 80 | delimiter: " kB\nVmLib:\t", num: lib >> 10, width: 8); | 
|---|
| 81 | seq_put_decimal_ull_width(m, | 
|---|
| 82 | delimiter: " kB\nVmPTE:\t", num: mm_pgtables_bytes(mm) >> 10, width: 8); | 
|---|
| 83 | SEQ_PUT_DEC( " kB\nVmSwap:\t", swap); | 
|---|
| 84 | seq_puts(m, s: " kB\n"); | 
|---|
| 85 | hugetlb_report_usage(m, mm); | 
|---|
| 86 | } | 
|---|
| 87 | #undef SEQ_PUT_DEC | 
|---|
| 88 |  | 
|---|
| 89 | unsigned long task_vsize(struct mm_struct *mm) | 
|---|
| 90 | { | 
|---|
| 91 | return PAGE_SIZE * mm->total_vm; | 
|---|
| 92 | } | 
|---|
| 93 |  | 
|---|
| 94 | unsigned long task_statm(struct mm_struct *mm, | 
|---|
| 95 | unsigned long *shared, unsigned long *text, | 
|---|
| 96 | unsigned long *data, unsigned long *resident) | 
|---|
| 97 | { | 
|---|
| 98 | *shared = get_mm_counter_sum(mm, member: MM_FILEPAGES) + | 
|---|
| 99 | get_mm_counter_sum(mm, member: MM_SHMEMPAGES); | 
|---|
| 100 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) | 
|---|
| 101 | >> PAGE_SHIFT; | 
|---|
| 102 | *data = mm->data_vm + mm->stack_vm; | 
|---|
| 103 | *resident = *shared + get_mm_counter_sum(mm, member: MM_ANONPAGES); | 
|---|
| 104 | return mm->total_vm; | 
|---|
| 105 | } | 
|---|
| 106 |  | 
|---|
| 107 | #ifdef CONFIG_NUMA | 
|---|
| 108 | /* | 
|---|
| 109 | * Save get_task_policy() for show_numa_map(). | 
|---|
| 110 | */ | 
|---|
| 111 | static void hold_task_mempolicy(struct proc_maps_private *priv) | 
|---|
| 112 | { | 
|---|
| 113 | struct task_struct *task = priv->task; | 
|---|
| 114 |  | 
|---|
| 115 | task_lock(p: task); | 
|---|
| 116 | priv->task_mempolicy = get_task_policy(p: task); | 
|---|
| 117 | mpol_get(pol: priv->task_mempolicy); | 
|---|
| 118 | task_unlock(p: task); | 
|---|
| 119 | } | 
|---|
| 120 | static void release_task_mempolicy(struct proc_maps_private *priv) | 
|---|
| 121 | { | 
|---|
| 122 | mpol_put(pol: priv->task_mempolicy); | 
|---|
| 123 | } | 
|---|
| 124 | #else | 
|---|
| 125 | static void hold_task_mempolicy(struct proc_maps_private *priv) | 
|---|
| 126 | { | 
|---|
| 127 | } | 
|---|
| 128 | static void release_task_mempolicy(struct proc_maps_private *priv) | 
|---|
| 129 | { | 
|---|
| 130 | } | 
|---|
| 131 | #endif | 
|---|
| 132 |  | 
|---|
| 133 | #ifdef CONFIG_PER_VMA_LOCK | 
|---|
| 134 |  | 
|---|
| 135 | static void reset_lock_ctx(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 136 | { | 
|---|
| 137 | lock_ctx->locked_vma = NULL; | 
|---|
| 138 | lock_ctx->mmap_locked = false; | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|
| 141 | static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 142 | { | 
|---|
| 143 | if (lock_ctx->locked_vma) { | 
|---|
| 144 | vma_end_read(vma: lock_ctx->locked_vma); | 
|---|
| 145 | lock_ctx->locked_vma = NULL; | 
|---|
| 146 | } | 
|---|
| 147 | } | 
|---|
| 148 |  | 
|---|
| 149 | static const struct seq_operations proc_pid_maps_op; | 
|---|
| 150 |  | 
|---|
| 151 | static inline bool lock_vma_range(struct seq_file *m, | 
|---|
| 152 | struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 153 | { | 
|---|
| 154 | /* | 
|---|
| 155 | * smaps and numa_maps perform page table walk, therefore require | 
|---|
| 156 | * mmap_lock but maps can be read with locking just the vma and | 
|---|
| 157 | * walking the vma tree under rcu read protection. | 
|---|
| 158 | */ | 
|---|
| 159 | if (m->op != &proc_pid_maps_op) { | 
|---|
| 160 | if (mmap_read_lock_killable(mm: lock_ctx->mm)) | 
|---|
| 161 | return false; | 
|---|
| 162 |  | 
|---|
| 163 | lock_ctx->mmap_locked = true; | 
|---|
| 164 | } else { | 
|---|
| 165 | rcu_read_lock(); | 
|---|
| 166 | reset_lock_ctx(lock_ctx); | 
|---|
| 167 | } | 
|---|
| 168 |  | 
|---|
| 169 | return true; | 
|---|
| 170 | } | 
|---|
| 171 |  | 
|---|
| 172 | static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 173 | { | 
|---|
| 174 | if (lock_ctx->mmap_locked) { | 
|---|
| 175 | mmap_read_unlock(mm: lock_ctx->mm); | 
|---|
| 176 | } else { | 
|---|
| 177 | unlock_ctx_vma(lock_ctx); | 
|---|
| 178 | rcu_read_unlock(); | 
|---|
| 179 | } | 
|---|
| 180 | } | 
|---|
| 181 |  | 
|---|
| 182 | static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv, | 
|---|
| 183 | loff_t last_pos) | 
|---|
| 184 | { | 
|---|
| 185 | struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx; | 
|---|
| 186 | struct vm_area_struct *vma; | 
|---|
| 187 |  | 
|---|
| 188 | if (lock_ctx->mmap_locked) | 
|---|
| 189 | return vma_next(vmi: &priv->iter); | 
|---|
| 190 |  | 
|---|
| 191 | unlock_ctx_vma(lock_ctx); | 
|---|
| 192 | vma = lock_next_vma(mm: lock_ctx->mm, iter: &priv->iter, address: last_pos); | 
|---|
| 193 | if (!IS_ERR_OR_NULL(ptr: vma)) | 
|---|
| 194 | lock_ctx->locked_vma = vma; | 
|---|
| 195 |  | 
|---|
| 196 | return vma; | 
|---|
| 197 | } | 
|---|
| 198 |  | 
|---|
| 199 | static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv, | 
|---|
| 200 | loff_t pos) | 
|---|
| 201 | { | 
|---|
| 202 | struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx; | 
|---|
| 203 |  | 
|---|
| 204 | if (lock_ctx->mmap_locked) | 
|---|
| 205 | return false; | 
|---|
| 206 |  | 
|---|
| 207 | rcu_read_unlock(); | 
|---|
| 208 | mmap_read_lock(mm: lock_ctx->mm); | 
|---|
| 209 | /* Reinitialize the iterator after taking mmap_lock */ | 
|---|
| 210 | vma_iter_set(vmi: &priv->iter, addr: pos); | 
|---|
| 211 | lock_ctx->mmap_locked = true; | 
|---|
| 212 |  | 
|---|
| 213 | return true; | 
|---|
| 214 | } | 
|---|
| 215 |  | 
|---|
| 216 | #else /* CONFIG_PER_VMA_LOCK */ | 
|---|
| 217 |  | 
|---|
| 218 | static inline bool lock_vma_range(struct seq_file *m, | 
|---|
| 219 | struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 220 | { | 
|---|
| 221 | return mmap_read_lock_killable(lock_ctx->mm) == 0; | 
|---|
| 222 | } | 
|---|
| 223 |  | 
|---|
| 224 | static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 225 | { | 
|---|
| 226 | mmap_read_unlock(lock_ctx->mm); | 
|---|
| 227 | } | 
|---|
| 228 |  | 
|---|
| 229 | static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv, | 
|---|
| 230 | loff_t last_pos) | 
|---|
| 231 | { | 
|---|
| 232 | return vma_next(&priv->iter); | 
|---|
| 233 | } | 
|---|
| 234 |  | 
|---|
| 235 | static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv, | 
|---|
| 236 | loff_t pos) | 
|---|
| 237 | { | 
|---|
| 238 | return false; | 
|---|
| 239 | } | 
|---|
| 240 |  | 
|---|
| 241 | #endif /* CONFIG_PER_VMA_LOCK */ | 
|---|
| 242 |  | 
|---|
| 243 | static struct vm_area_struct *proc_get_vma(struct seq_file *m, loff_t *ppos) | 
|---|
| 244 | { | 
|---|
| 245 | struct proc_maps_private *priv = m->private; | 
|---|
| 246 | struct vm_area_struct *vma; | 
|---|
| 247 |  | 
|---|
| 248 | retry: | 
|---|
| 249 | vma = get_next_vma(priv, last_pos: *ppos); | 
|---|
| 250 | /* EINTR of EAGAIN is possible */ | 
|---|
| 251 | if (IS_ERR(ptr: vma)) { | 
|---|
| 252 | if (PTR_ERR(ptr: vma) == -EAGAIN && fallback_to_mmap_lock(priv, pos: *ppos)) | 
|---|
| 253 | goto retry; | 
|---|
| 254 |  | 
|---|
| 255 | return vma; | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|
| 258 | /* Store previous position to be able to restart if needed */ | 
|---|
| 259 | priv->last_pos = *ppos; | 
|---|
| 260 | if (vma) { | 
|---|
| 261 | /* | 
|---|
| 262 | * Track the end of the reported vma to ensure position changes | 
|---|
| 263 | * even if previous vma was merged with the next vma and we | 
|---|
| 264 | * found the extended vma with the same vm_start. | 
|---|
| 265 | */ | 
|---|
| 266 | *ppos = vma->vm_end; | 
|---|
| 267 | } else { | 
|---|
| 268 | *ppos = SENTINEL_VMA_GATE; | 
|---|
| 269 | vma = get_gate_vma(mm: priv->lock_ctx.mm); | 
|---|
| 270 | } | 
|---|
| 271 |  | 
|---|
| 272 | return vma; | 
|---|
| 273 | } | 
|---|
| 274 |  | 
|---|
| 275 | static void *m_start(struct seq_file *m, loff_t *ppos) | 
|---|
| 276 | { | 
|---|
| 277 | struct proc_maps_private *priv = m->private; | 
|---|
| 278 | struct proc_maps_locking_ctx *lock_ctx; | 
|---|
| 279 | loff_t last_addr = *ppos; | 
|---|
| 280 | struct mm_struct *mm; | 
|---|
| 281 |  | 
|---|
| 282 | /* See m_next(). Zero at the start or after lseek. */ | 
|---|
| 283 | if (last_addr == SENTINEL_VMA_END) | 
|---|
| 284 | return NULL; | 
|---|
| 285 |  | 
|---|
| 286 | priv->task = get_proc_task(inode: priv->inode); | 
|---|
| 287 | if (!priv->task) | 
|---|
| 288 | return ERR_PTR(error: -ESRCH); | 
|---|
| 289 |  | 
|---|
| 290 | lock_ctx = &priv->lock_ctx; | 
|---|
| 291 | mm = lock_ctx->mm; | 
|---|
| 292 | if (!mm || !mmget_not_zero(mm)) { | 
|---|
| 293 | put_task_struct(t: priv->task); | 
|---|
| 294 | priv->task = NULL; | 
|---|
| 295 | return NULL; | 
|---|
| 296 | } | 
|---|
| 297 |  | 
|---|
| 298 | if (!lock_vma_range(m, lock_ctx)) { | 
|---|
| 299 | mmput(mm); | 
|---|
| 300 | put_task_struct(t: priv->task); | 
|---|
| 301 | priv->task = NULL; | 
|---|
| 302 | return ERR_PTR(error: -EINTR); | 
|---|
| 303 | } | 
|---|
| 304 |  | 
|---|
| 305 | /* | 
|---|
| 306 | * Reset current position if last_addr was set before | 
|---|
| 307 | * and it's not a sentinel. | 
|---|
| 308 | */ | 
|---|
| 309 | if (last_addr > 0) | 
|---|
| 310 | *ppos = last_addr = priv->last_pos; | 
|---|
| 311 | vma_iter_init(vmi: &priv->iter, mm, addr: (unsigned long)last_addr); | 
|---|
| 312 | hold_task_mempolicy(priv); | 
|---|
| 313 | if (last_addr == SENTINEL_VMA_GATE) | 
|---|
| 314 | return get_gate_vma(mm); | 
|---|
| 315 |  | 
|---|
| 316 | return proc_get_vma(m, ppos); | 
|---|
| 317 | } | 
|---|
| 318 |  | 
|---|
| 319 | static void *m_next(struct seq_file *m, void *v, loff_t *ppos) | 
|---|
| 320 | { | 
|---|
| 321 | if (*ppos == SENTINEL_VMA_GATE) { | 
|---|
| 322 | *ppos = SENTINEL_VMA_END; | 
|---|
| 323 | return NULL; | 
|---|
| 324 | } | 
|---|
| 325 | return proc_get_vma(m, ppos); | 
|---|
| 326 | } | 
|---|
| 327 |  | 
|---|
| 328 | static void m_stop(struct seq_file *m, void *v) | 
|---|
| 329 | { | 
|---|
| 330 | struct proc_maps_private *priv = m->private; | 
|---|
| 331 | struct mm_struct *mm = priv->lock_ctx.mm; | 
|---|
| 332 |  | 
|---|
| 333 | if (!priv->task) | 
|---|
| 334 | return; | 
|---|
| 335 |  | 
|---|
| 336 | release_task_mempolicy(priv); | 
|---|
| 337 | unlock_vma_range(lock_ctx: &priv->lock_ctx); | 
|---|
| 338 | mmput(mm); | 
|---|
| 339 | put_task_struct(t: priv->task); | 
|---|
| 340 | priv->task = NULL; | 
|---|
| 341 | } | 
|---|
| 342 |  | 
|---|
| 343 | static int proc_maps_open(struct inode *inode, struct file *file, | 
|---|
| 344 | const struct seq_operations *ops, int psize) | 
|---|
| 345 | { | 
|---|
| 346 | struct proc_maps_private *priv = __seq_open_private(file, ops, psize); | 
|---|
| 347 |  | 
|---|
| 348 | if (!priv) | 
|---|
| 349 | return -ENOMEM; | 
|---|
| 350 |  | 
|---|
| 351 | priv->inode = inode; | 
|---|
| 352 | priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ); | 
|---|
| 353 | if (IS_ERR(ptr: priv->lock_ctx.mm)) { | 
|---|
| 354 | int err = PTR_ERR(ptr: priv->lock_ctx.mm); | 
|---|
| 355 |  | 
|---|
| 356 | seq_release_private(inode, file); | 
|---|
| 357 | return err; | 
|---|
| 358 | } | 
|---|
| 359 |  | 
|---|
| 360 | return 0; | 
|---|
| 361 | } | 
|---|
| 362 |  | 
|---|
| 363 | static int proc_map_release(struct inode *inode, struct file *file) | 
|---|
| 364 | { | 
|---|
| 365 | struct seq_file *seq = file->private_data; | 
|---|
| 366 | struct proc_maps_private *priv = seq->private; | 
|---|
| 367 |  | 
|---|
| 368 | if (priv->lock_ctx.mm) | 
|---|
| 369 | mmdrop(mm: priv->lock_ctx.mm); | 
|---|
| 370 |  | 
|---|
| 371 | return seq_release_private(inode, file); | 
|---|
| 372 | } | 
|---|
| 373 |  | 
|---|
| 374 | static int do_maps_open(struct inode *inode, struct file *file, | 
|---|
| 375 | const struct seq_operations *ops) | 
|---|
| 376 | { | 
|---|
| 377 | return proc_maps_open(inode, file, ops, | 
|---|
| 378 | psize: sizeof(struct proc_maps_private)); | 
|---|
| 379 | } | 
|---|
| 380 |  | 
|---|
| 381 | static void get_vma_name(struct vm_area_struct *vma, | 
|---|
| 382 | const struct path **path, | 
|---|
| 383 | const char **name, | 
|---|
| 384 | const char **name_fmt) | 
|---|
| 385 | { | 
|---|
| 386 | struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL; | 
|---|
| 387 |  | 
|---|
| 388 | *name = NULL; | 
|---|
| 389 | *path = NULL; | 
|---|
| 390 | *name_fmt = NULL; | 
|---|
| 391 |  | 
|---|
| 392 | /* | 
|---|
| 393 | * Print the dentry name for named mappings, and a | 
|---|
| 394 | * special [heap] marker for the heap: | 
|---|
| 395 | */ | 
|---|
| 396 | if (vma->vm_file) { | 
|---|
| 397 | /* | 
|---|
| 398 | * If user named this anon shared memory via | 
|---|
| 399 | * prctl(PR_SET_VMA ..., use the provided name. | 
|---|
| 400 | */ | 
|---|
| 401 | if (anon_name) { | 
|---|
| 402 | *name_fmt = "[anon_shmem:%s]"; | 
|---|
| 403 | *name = anon_name->name; | 
|---|
| 404 | } else { | 
|---|
| 405 | *path = file_user_path(f: vma->vm_file); | 
|---|
| 406 | } | 
|---|
| 407 | return; | 
|---|
| 408 | } | 
|---|
| 409 |  | 
|---|
| 410 | if (vma->vm_ops && vma->vm_ops->name) { | 
|---|
| 411 | *name = vma->vm_ops->name(vma); | 
|---|
| 412 | if (*name) | 
|---|
| 413 | return; | 
|---|
| 414 | } | 
|---|
| 415 |  | 
|---|
| 416 | *name = arch_vma_name(vma); | 
|---|
| 417 | if (*name) | 
|---|
| 418 | return; | 
|---|
| 419 |  | 
|---|
| 420 | if (!vma->vm_mm) { | 
|---|
| 421 | *name = "[vdso]"; | 
|---|
| 422 | return; | 
|---|
| 423 | } | 
|---|
| 424 |  | 
|---|
| 425 | if (vma_is_initial_heap(vma)) { | 
|---|
| 426 | *name = "[heap]"; | 
|---|
| 427 | return; | 
|---|
| 428 | } | 
|---|
| 429 |  | 
|---|
| 430 | if (vma_is_initial_stack(vma)) { | 
|---|
| 431 | *name = "[stack]"; | 
|---|
| 432 | return; | 
|---|
| 433 | } | 
|---|
| 434 |  | 
|---|
| 435 | if (anon_name) { | 
|---|
| 436 | *name_fmt = "[anon:%s]"; | 
|---|
| 437 | *name = anon_name->name; | 
|---|
| 438 | return; | 
|---|
| 439 | } | 
|---|
| 440 | } | 
|---|
| 441 |  | 
|---|
| 442 | static void (struct seq_file *m, | 
|---|
| 443 | unsigned long start, unsigned long end, | 
|---|
| 444 | vm_flags_t flags, unsigned long long pgoff, | 
|---|
| 445 | dev_t dev, unsigned long ino) | 
|---|
| 446 | { | 
|---|
| 447 | seq_setwidth(m, size: 25 + sizeof(void *) * 6 - 1); | 
|---|
| 448 | seq_put_hex_ll(m, NULL, v: start, width: 8); | 
|---|
| 449 | seq_put_hex_ll(m, delimiter: "-", v: end, width: 8); | 
|---|
| 450 | seq_putc(m, c: ' '); | 
|---|
| 451 | seq_putc(m, c: flags & VM_READ ? 'r' : '-'); | 
|---|
| 452 | seq_putc(m, c: flags & VM_WRITE ? 'w' : '-'); | 
|---|
| 453 | seq_putc(m, c: flags & VM_EXEC ? 'x' : '-'); | 
|---|
| 454 | seq_putc(m, c: flags & VM_MAYSHARE ? 's' : 'p'); | 
|---|
| 455 | seq_put_hex_ll(m, delimiter: " ", v: pgoff, width: 8); | 
|---|
| 456 | seq_put_hex_ll(m, delimiter: " ", MAJOR(dev), width: 2); | 
|---|
| 457 | seq_put_hex_ll(m, delimiter: ":", MINOR(dev), width: 2); | 
|---|
| 458 | seq_put_decimal_ull(m, delimiter: " ", num: ino); | 
|---|
| 459 | seq_putc(m, c: ' '); | 
|---|
| 460 | } | 
|---|
| 461 |  | 
|---|
| 462 | static void | 
|---|
| 463 | show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | 
|---|
| 464 | { | 
|---|
| 465 | const struct path *path; | 
|---|
| 466 | const char *name_fmt, *name; | 
|---|
| 467 | vm_flags_t flags = vma->vm_flags; | 
|---|
| 468 | unsigned long ino = 0; | 
|---|
| 469 | unsigned long long pgoff = 0; | 
|---|
| 470 | unsigned long start, end; | 
|---|
| 471 | dev_t dev = 0; | 
|---|
| 472 |  | 
|---|
| 473 | if (vma->vm_file) { | 
|---|
| 474 | const struct inode *inode = file_user_inode(f: vma->vm_file); | 
|---|
| 475 |  | 
|---|
| 476 | dev = inode->i_sb->s_dev; | 
|---|
| 477 | ino = inode->i_ino; | 
|---|
| 478 | pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; | 
|---|
| 479 | } | 
|---|
| 480 |  | 
|---|
| 481 | start = vma->vm_start; | 
|---|
| 482 | end = vma->vm_end; | 
|---|
| 483 | show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); | 
|---|
| 484 |  | 
|---|
| 485 | get_vma_name(vma, path: &path, name: &name, name_fmt: &name_fmt); | 
|---|
| 486 | if (path) { | 
|---|
| 487 | seq_pad(m, c: ' '); | 
|---|
| 488 | seq_path(m, path, "\n"); | 
|---|
| 489 | } else if (name_fmt) { | 
|---|
| 490 | seq_pad(m, c: ' '); | 
|---|
| 491 | seq_printf(m, fmt: name_fmt, name); | 
|---|
| 492 | } else if (name) { | 
|---|
| 493 | seq_pad(m, c: ' '); | 
|---|
| 494 | seq_puts(m, s: name); | 
|---|
| 495 | } | 
|---|
| 496 | seq_putc(m, c: '\n'); | 
|---|
| 497 | } | 
|---|
| 498 |  | 
|---|
| 499 | static int show_map(struct seq_file *m, void *v) | 
|---|
| 500 | { | 
|---|
| 501 | show_map_vma(m, vma: v); | 
|---|
| 502 | return 0; | 
|---|
| 503 | } | 
|---|
| 504 |  | 
|---|
| 505 | static const struct seq_operations proc_pid_maps_op = { | 
|---|
| 506 | .start	= m_start, | 
|---|
| 507 | .next	= m_next, | 
|---|
| 508 | .stop	= m_stop, | 
|---|
| 509 | .show	= show_map | 
|---|
| 510 | }; | 
|---|
| 511 |  | 
|---|
| 512 | static int pid_maps_open(struct inode *inode, struct file *file) | 
|---|
| 513 | { | 
|---|
| 514 | return do_maps_open(inode, file, ops: &proc_pid_maps_op); | 
|---|
| 515 | } | 
|---|
| 516 |  | 
|---|
| 517 | #define PROCMAP_QUERY_VMA_FLAGS (				\ | 
|---|
| 518 | PROCMAP_QUERY_VMA_READABLE |			\ | 
|---|
| 519 | PROCMAP_QUERY_VMA_WRITABLE |			\ | 
|---|
| 520 | PROCMAP_QUERY_VMA_EXECUTABLE |			\ | 
|---|
| 521 | PROCMAP_QUERY_VMA_SHARED			\ | 
|---|
| 522 | ) | 
|---|
| 523 |  | 
|---|
| 524 | #define PROCMAP_QUERY_VALID_FLAGS_MASK (			\ | 
|---|
| 525 | PROCMAP_QUERY_COVERING_OR_NEXT_VMA |		\ | 
|---|
| 526 | PROCMAP_QUERY_FILE_BACKED_VMA |			\ | 
|---|
| 527 | PROCMAP_QUERY_VMA_FLAGS				\ | 
|---|
| 528 | ) | 
|---|
| 529 |  | 
|---|
| 530 | #ifdef CONFIG_PER_VMA_LOCK | 
|---|
| 531 |  | 
|---|
| 532 | static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 533 | { | 
|---|
| 534 | reset_lock_ctx(lock_ctx); | 
|---|
| 535 |  | 
|---|
| 536 | return 0; | 
|---|
| 537 | } | 
|---|
| 538 |  | 
|---|
| 539 | static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 540 | { | 
|---|
| 541 | if (lock_ctx->mmap_locked) { | 
|---|
| 542 | mmap_read_unlock(mm: lock_ctx->mm); | 
|---|
| 543 | lock_ctx->mmap_locked = false; | 
|---|
| 544 | } else { | 
|---|
| 545 | unlock_ctx_vma(lock_ctx); | 
|---|
| 546 | } | 
|---|
| 547 | } | 
|---|
| 548 |  | 
|---|
| 549 | static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx, | 
|---|
| 550 | unsigned long addr) | 
|---|
| 551 | { | 
|---|
| 552 | struct mm_struct *mm = lock_ctx->mm; | 
|---|
| 553 | struct vm_area_struct *vma; | 
|---|
| 554 | struct vma_iterator vmi; | 
|---|
| 555 |  | 
|---|
| 556 | if (lock_ctx->mmap_locked) | 
|---|
| 557 | return find_vma(mm, addr); | 
|---|
| 558 |  | 
|---|
| 559 | /* Unlock previously locked VMA and find the next one under RCU */ | 
|---|
| 560 | unlock_ctx_vma(lock_ctx); | 
|---|
| 561 | rcu_read_lock(); | 
|---|
| 562 | vma_iter_init(vmi: &vmi, mm, addr); | 
|---|
| 563 | vma = lock_next_vma(mm, iter: &vmi, address: addr); | 
|---|
| 564 | rcu_read_unlock(); | 
|---|
| 565 |  | 
|---|
| 566 | if (!vma) | 
|---|
| 567 | return NULL; | 
|---|
| 568 |  | 
|---|
| 569 | if (!IS_ERR(ptr: vma)) { | 
|---|
| 570 | lock_ctx->locked_vma = vma; | 
|---|
| 571 | return vma; | 
|---|
| 572 | } | 
|---|
| 573 |  | 
|---|
| 574 | if (PTR_ERR(ptr: vma) == -EAGAIN) { | 
|---|
| 575 | /* Fallback to mmap_lock on vma->vm_refcnt overflow */ | 
|---|
| 576 | mmap_read_lock(mm); | 
|---|
| 577 | vma = find_vma(mm, addr); | 
|---|
| 578 | lock_ctx->mmap_locked = true; | 
|---|
| 579 | } | 
|---|
| 580 |  | 
|---|
| 581 | return vma; | 
|---|
| 582 | } | 
|---|
| 583 |  | 
|---|
| 584 | #else /* CONFIG_PER_VMA_LOCK */ | 
|---|
| 585 |  | 
|---|
| 586 | static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 587 | { | 
|---|
| 588 | return mmap_read_lock_killable(lock_ctx->mm); | 
|---|
| 589 | } | 
|---|
| 590 |  | 
|---|
| 591 | static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx) | 
|---|
| 592 | { | 
|---|
| 593 | mmap_read_unlock(lock_ctx->mm); | 
|---|
| 594 | } | 
|---|
| 595 |  | 
|---|
| 596 | static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx, | 
|---|
| 597 | unsigned long addr) | 
|---|
| 598 | { | 
|---|
| 599 | return find_vma(lock_ctx->mm, addr); | 
|---|
| 600 | } | 
|---|
| 601 |  | 
|---|
| 602 | #endif  /* CONFIG_PER_VMA_LOCK */ | 
|---|
| 603 |  | 
|---|
| 604 | static struct vm_area_struct *query_matching_vma(struct proc_maps_locking_ctx *lock_ctx, | 
|---|
| 605 | unsigned long addr, u32 flags) | 
|---|
| 606 | { | 
|---|
| 607 | struct vm_area_struct *vma; | 
|---|
| 608 |  | 
|---|
| 609 | next_vma: | 
|---|
| 610 | vma = query_vma_find_by_addr(lock_ctx, addr); | 
|---|
| 611 | if (IS_ERR(ptr: vma)) | 
|---|
| 612 | return vma; | 
|---|
| 613 |  | 
|---|
| 614 | if (!vma) | 
|---|
| 615 | goto no_vma; | 
|---|
| 616 |  | 
|---|
| 617 | /* user requested only file-backed VMA, keep iterating */ | 
|---|
| 618 | if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file) | 
|---|
| 619 | goto skip_vma; | 
|---|
| 620 |  | 
|---|
| 621 | /* VMA permissions should satisfy query flags */ | 
|---|
| 622 | if (flags & PROCMAP_QUERY_VMA_FLAGS) { | 
|---|
| 623 | u32 perm = 0; | 
|---|
| 624 |  | 
|---|
| 625 | if (flags & PROCMAP_QUERY_VMA_READABLE) | 
|---|
| 626 | perm |= VM_READ; | 
|---|
| 627 | if (flags & PROCMAP_QUERY_VMA_WRITABLE) | 
|---|
| 628 | perm |= VM_WRITE; | 
|---|
| 629 | if (flags & PROCMAP_QUERY_VMA_EXECUTABLE) | 
|---|
| 630 | perm |= VM_EXEC; | 
|---|
| 631 | if (flags & PROCMAP_QUERY_VMA_SHARED) | 
|---|
| 632 | perm |= VM_MAYSHARE; | 
|---|
| 633 |  | 
|---|
| 634 | if ((vma->vm_flags & perm) != perm) | 
|---|
| 635 | goto skip_vma; | 
|---|
| 636 | } | 
|---|
| 637 |  | 
|---|
| 638 | /* found covering VMA or user is OK with the matching next VMA */ | 
|---|
| 639 | if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr) | 
|---|
| 640 | return vma; | 
|---|
| 641 |  | 
|---|
| 642 | skip_vma: | 
|---|
| 643 | /* | 
|---|
| 644 | * If the user needs closest matching VMA, keep iterating. | 
|---|
| 645 | */ | 
|---|
| 646 | addr = vma->vm_end; | 
|---|
| 647 | if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) | 
|---|
| 648 | goto next_vma; | 
|---|
| 649 |  | 
|---|
| 650 | no_vma: | 
|---|
| 651 | return ERR_PTR(error: -ENOENT); | 
|---|
| 652 | } | 
|---|
| 653 |  | 
|---|
| 654 | static int do_procmap_query(struct mm_struct *mm, void __user *uarg) | 
|---|
| 655 | { | 
|---|
| 656 | struct proc_maps_locking_ctx lock_ctx = { .mm = mm }; | 
|---|
| 657 | struct procmap_query karg; | 
|---|
| 658 | struct vm_area_struct *vma; | 
|---|
| 659 | const char *name = NULL; | 
|---|
| 660 | char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; | 
|---|
| 661 | __u64 usize; | 
|---|
| 662 | int err; | 
|---|
| 663 |  | 
|---|
| 664 | if (copy_from_user(to: &usize, from: (void __user *)uarg, n: sizeof(usize))) | 
|---|
| 665 | return -EFAULT; | 
|---|
| 666 | /* argument struct can never be that large, reject abuse */ | 
|---|
| 667 | if (usize > PAGE_SIZE) | 
|---|
| 668 | return -E2BIG; | 
|---|
| 669 | /* argument struct should have at least query_flags and query_addr fields */ | 
|---|
| 670 | if (usize < offsetofend(struct procmap_query, query_addr)) | 
|---|
| 671 | return -EINVAL; | 
|---|
| 672 | err = copy_struct_from_user(dst: &karg, ksize: sizeof(karg), src: uarg, usize); | 
|---|
| 673 | if (err) | 
|---|
| 674 | return err; | 
|---|
| 675 |  | 
|---|
| 676 | /* reject unknown flags */ | 
|---|
| 677 | if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK) | 
|---|
| 678 | return -EINVAL; | 
|---|
| 679 | /* either both buffer address and size are set, or both should be zero */ | 
|---|
| 680 | if (!!karg.vma_name_size != !!karg.vma_name_addr) | 
|---|
| 681 | return -EINVAL; | 
|---|
| 682 | if (!!karg.build_id_size != !!karg.build_id_addr) | 
|---|
| 683 | return -EINVAL; | 
|---|
| 684 |  | 
|---|
| 685 | if (!mm || !mmget_not_zero(mm)) | 
|---|
| 686 | return -ESRCH; | 
|---|
| 687 |  | 
|---|
| 688 | err = query_vma_setup(lock_ctx: &lock_ctx); | 
|---|
| 689 | if (err) { | 
|---|
| 690 | mmput(mm); | 
|---|
| 691 | return err; | 
|---|
| 692 | } | 
|---|
| 693 |  | 
|---|
| 694 | vma = query_matching_vma(lock_ctx: &lock_ctx, addr: karg.query_addr, flags: karg.query_flags); | 
|---|
| 695 | if (IS_ERR(ptr: vma)) { | 
|---|
| 696 | err = PTR_ERR(ptr: vma); | 
|---|
| 697 | vma = NULL; | 
|---|
| 698 | goto out; | 
|---|
| 699 | } | 
|---|
| 700 |  | 
|---|
| 701 | karg.vma_start = vma->vm_start; | 
|---|
| 702 | karg.vma_end = vma->vm_end; | 
|---|
| 703 |  | 
|---|
| 704 | karg.vma_flags = 0; | 
|---|
| 705 | if (vma->vm_flags & VM_READ) | 
|---|
| 706 | karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE; | 
|---|
| 707 | if (vma->vm_flags & VM_WRITE) | 
|---|
| 708 | karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE; | 
|---|
| 709 | if (vma->vm_flags & VM_EXEC) | 
|---|
| 710 | karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE; | 
|---|
| 711 | if (vma->vm_flags & VM_MAYSHARE) | 
|---|
| 712 | karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED; | 
|---|
| 713 |  | 
|---|
| 714 | karg.vma_page_size = vma_kernel_pagesize(vma); | 
|---|
| 715 |  | 
|---|
| 716 | if (vma->vm_file) { | 
|---|
| 717 | const struct inode *inode = file_user_inode(f: vma->vm_file); | 
|---|
| 718 |  | 
|---|
| 719 | karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT; | 
|---|
| 720 | karg.dev_major = MAJOR(inode->i_sb->s_dev); | 
|---|
| 721 | karg.dev_minor = MINOR(inode->i_sb->s_dev); | 
|---|
| 722 | karg.inode = inode->i_ino; | 
|---|
| 723 | } else { | 
|---|
| 724 | karg.vma_offset = 0; | 
|---|
| 725 | karg.dev_major = 0; | 
|---|
| 726 | karg.dev_minor = 0; | 
|---|
| 727 | karg.inode = 0; | 
|---|
| 728 | } | 
|---|
| 729 |  | 
|---|
| 730 | if (karg.build_id_size) { | 
|---|
| 731 | __u32 build_id_sz; | 
|---|
| 732 |  | 
|---|
| 733 | err = build_id_parse(vma, build_id: build_id_buf, size: &build_id_sz); | 
|---|
| 734 | if (err) { | 
|---|
| 735 | karg.build_id_size = 0; | 
|---|
| 736 | } else { | 
|---|
| 737 | if (karg.build_id_size < build_id_sz) { | 
|---|
| 738 | err = -ENAMETOOLONG; | 
|---|
| 739 | goto out; | 
|---|
| 740 | } | 
|---|
| 741 | karg.build_id_size = build_id_sz; | 
|---|
| 742 | } | 
|---|
| 743 | } | 
|---|
| 744 |  | 
|---|
| 745 | if (karg.vma_name_size) { | 
|---|
| 746 | size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size); | 
|---|
| 747 | const struct path *path; | 
|---|
| 748 | const char *name_fmt; | 
|---|
| 749 | size_t name_sz = 0; | 
|---|
| 750 |  | 
|---|
| 751 | get_vma_name(vma, path: &path, name: &name, name_fmt: &name_fmt); | 
|---|
| 752 |  | 
|---|
| 753 | if (path || name_fmt || name) { | 
|---|
| 754 | name_buf = kmalloc(name_buf_sz, GFP_KERNEL); | 
|---|
| 755 | if (!name_buf) { | 
|---|
| 756 | err = -ENOMEM; | 
|---|
| 757 | goto out; | 
|---|
| 758 | } | 
|---|
| 759 | } | 
|---|
| 760 | if (path) { | 
|---|
| 761 | name = d_path(path, name_buf, name_buf_sz); | 
|---|
| 762 | if (IS_ERR(ptr: name)) { | 
|---|
| 763 | err = PTR_ERR(ptr: name); | 
|---|
| 764 | goto out; | 
|---|
| 765 | } | 
|---|
| 766 | name_sz = name_buf + name_buf_sz - name; | 
|---|
| 767 | } else if (name || name_fmt) { | 
|---|
| 768 | name_sz = 1 + snprintf(buf: name_buf, size: name_buf_sz, fmt: name_fmt ?: "%s", name); | 
|---|
| 769 | name = name_buf; | 
|---|
| 770 | } | 
|---|
| 771 | if (name_sz > name_buf_sz) { | 
|---|
| 772 | err = -ENAMETOOLONG; | 
|---|
| 773 | goto out; | 
|---|
| 774 | } | 
|---|
| 775 | karg.vma_name_size = name_sz; | 
|---|
| 776 | } | 
|---|
| 777 |  | 
|---|
| 778 | /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ | 
|---|
| 779 | query_vma_teardown(lock_ctx: &lock_ctx); | 
|---|
| 780 | mmput(mm); | 
|---|
| 781 |  | 
|---|
| 782 | if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), | 
|---|
| 783 | from: name, n: karg.vma_name_size)) { | 
|---|
| 784 | kfree(objp: name_buf); | 
|---|
| 785 | return -EFAULT; | 
|---|
| 786 | } | 
|---|
| 787 | kfree(objp: name_buf); | 
|---|
| 788 |  | 
|---|
| 789 | if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr), | 
|---|
| 790 | from: build_id_buf, n: karg.build_id_size)) | 
|---|
| 791 | return -EFAULT; | 
|---|
| 792 |  | 
|---|
| 793 | if (copy_to_user(to: uarg, from: &karg, min_t(size_t, sizeof(karg), usize))) | 
|---|
| 794 | return -EFAULT; | 
|---|
| 795 |  | 
|---|
| 796 | return 0; | 
|---|
| 797 |  | 
|---|
| 798 | out: | 
|---|
| 799 | query_vma_teardown(lock_ctx: &lock_ctx); | 
|---|
| 800 | mmput(mm); | 
|---|
| 801 | kfree(objp: name_buf); | 
|---|
| 802 | return err; | 
|---|
| 803 | } | 
|---|
| 804 |  | 
|---|
| 805 | static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 
|---|
| 806 | { | 
|---|
| 807 | struct seq_file *seq = file->private_data; | 
|---|
| 808 | struct proc_maps_private *priv = seq->private; | 
|---|
| 809 |  | 
|---|
| 810 | switch (cmd) { | 
|---|
| 811 | case PROCMAP_QUERY: | 
|---|
| 812 | /* priv->lock_ctx.mm is set during file open operation */ | 
|---|
| 813 | return do_procmap_query(mm: priv->lock_ctx.mm, uarg: (void __user *)arg); | 
|---|
| 814 | default: | 
|---|
| 815 | return -ENOIOCTLCMD; | 
|---|
| 816 | } | 
|---|
| 817 | } | 
|---|
| 818 |  | 
|---|
| 819 | const struct file_operations proc_pid_maps_operations = { | 
|---|
| 820 | .open		= pid_maps_open, | 
|---|
| 821 | .read		= seq_read, | 
|---|
| 822 | .llseek		= seq_lseek, | 
|---|
| 823 | .release	= proc_map_release, | 
|---|
| 824 | .unlocked_ioctl = procfs_procmap_ioctl, | 
|---|
| 825 | .compat_ioctl	= compat_ptr_ioctl, | 
|---|
| 826 | }; | 
|---|
| 827 |  | 
|---|
| 828 | /* | 
|---|
| 829 | * Proportional Set Size(PSS): my share of RSS. | 
|---|
| 830 | * | 
|---|
| 831 | * PSS of a process is the count of pages it has in memory, where each | 
|---|
| 832 | * page is divided by the number of processes sharing it.  So if a | 
|---|
| 833 | * process has 1000 pages all to itself, and 1000 shared with one other | 
|---|
| 834 | * process, its PSS will be 1500. | 
|---|
| 835 | * | 
|---|
| 836 | * To keep (accumulated) division errors low, we adopt a 64bit | 
|---|
| 837 | * fixed-point pss counter to minimize division errors. So (pss >> | 
|---|
| 838 | * PSS_SHIFT) would be the real byte count. | 
|---|
| 839 | * | 
|---|
| 840 | * A shift of 12 before division means (assuming 4K page size): | 
|---|
| 841 | * 	- 1M 3-user-pages add up to 8KB errors; | 
|---|
| 842 | * 	- supports mapcount up to 2^24, or 16M; | 
|---|
| 843 | * 	- supports PSS up to 2^52 bytes, or 4PB. | 
|---|
| 844 | */ | 
|---|
| 845 | #define PSS_SHIFT 12 | 
|---|
| 846 |  | 
|---|
| 847 | #ifdef CONFIG_PROC_PAGE_MONITOR | 
|---|
| 848 | struct mem_size_stats { | 
|---|
| 849 | unsigned long resident; | 
|---|
| 850 | unsigned long shared_clean; | 
|---|
| 851 | unsigned long shared_dirty; | 
|---|
| 852 | unsigned long private_clean; | 
|---|
| 853 | unsigned long private_dirty; | 
|---|
| 854 | unsigned long referenced; | 
|---|
| 855 | unsigned long anonymous; | 
|---|
| 856 | unsigned long lazyfree; | 
|---|
| 857 | unsigned long anonymous_thp; | 
|---|
| 858 | unsigned long shmem_thp; | 
|---|
| 859 | unsigned long file_thp; | 
|---|
| 860 | unsigned long swap; | 
|---|
| 861 | unsigned long shared_hugetlb; | 
|---|
| 862 | unsigned long private_hugetlb; | 
|---|
| 863 | unsigned long ksm; | 
|---|
| 864 | u64 pss; | 
|---|
| 865 | u64 pss_anon; | 
|---|
| 866 | u64 pss_file; | 
|---|
| 867 | u64 pss_shmem; | 
|---|
| 868 | u64 pss_dirty; | 
|---|
| 869 | u64 pss_locked; | 
|---|
| 870 | u64 swap_pss; | 
|---|
| 871 | }; | 
|---|
| 872 |  | 
|---|
| 873 | static void smaps_page_accumulate(struct mem_size_stats *mss, | 
|---|
| 874 | struct folio *folio, unsigned long size, unsigned long pss, | 
|---|
| 875 | bool dirty, bool locked, bool private) | 
|---|
| 876 | { | 
|---|
| 877 | mss->pss += pss; | 
|---|
| 878 |  | 
|---|
| 879 | if (folio_test_anon(folio)) | 
|---|
| 880 | mss->pss_anon += pss; | 
|---|
| 881 | else if (folio_test_swapbacked(folio)) | 
|---|
| 882 | mss->pss_shmem += pss; | 
|---|
| 883 | else | 
|---|
| 884 | mss->pss_file += pss; | 
|---|
| 885 |  | 
|---|
| 886 | if (locked) | 
|---|
| 887 | mss->pss_locked += pss; | 
|---|
| 888 |  | 
|---|
| 889 | if (dirty || folio_test_dirty(folio)) { | 
|---|
| 890 | mss->pss_dirty += pss; | 
|---|
| 891 | if (private) | 
|---|
| 892 | mss->private_dirty += size; | 
|---|
| 893 | else | 
|---|
| 894 | mss->shared_dirty += size; | 
|---|
| 895 | } else { | 
|---|
| 896 | if (private) | 
|---|
| 897 | mss->private_clean += size; | 
|---|
| 898 | else | 
|---|
| 899 | mss->shared_clean += size; | 
|---|
| 900 | } | 
|---|
| 901 | } | 
|---|
| 902 |  | 
|---|
| 903 | static void smaps_account(struct mem_size_stats *mss, struct page *page, | 
|---|
| 904 | bool compound, bool young, bool dirty, bool locked, | 
|---|
| 905 | bool present) | 
|---|
| 906 | { | 
|---|
| 907 | struct folio *folio = page_folio(page); | 
|---|
| 908 | int i, nr = compound ? compound_nr(page) : 1; | 
|---|
| 909 | unsigned long size = nr * PAGE_SIZE; | 
|---|
| 910 | bool exclusive; | 
|---|
| 911 | int mapcount; | 
|---|
| 912 |  | 
|---|
| 913 | /* | 
|---|
| 914 | * First accumulate quantities that depend only on |size| and the type | 
|---|
| 915 | * of the compound page. | 
|---|
| 916 | */ | 
|---|
| 917 | if (folio_test_anon(folio)) { | 
|---|
| 918 | mss->anonymous += size; | 
|---|
| 919 | if (!folio_test_swapbacked(folio) && !dirty && | 
|---|
| 920 | !folio_test_dirty(folio)) | 
|---|
| 921 | mss->lazyfree += size; | 
|---|
| 922 | } | 
|---|
| 923 |  | 
|---|
| 924 | if (folio_test_ksm(folio)) | 
|---|
| 925 | mss->ksm += size; | 
|---|
| 926 |  | 
|---|
| 927 | mss->resident += size; | 
|---|
| 928 | /* Accumulate the size in pages that have been accessed. */ | 
|---|
| 929 | if (young || folio_test_young(folio) || folio_test_referenced(folio)) | 
|---|
| 930 | mss->referenced += size; | 
|---|
| 931 |  | 
|---|
| 932 | /* | 
|---|
| 933 | * Then accumulate quantities that may depend on sharing, or that may | 
|---|
| 934 | * differ page-by-page. | 
|---|
| 935 | * | 
|---|
| 936 | * refcount == 1 for present entries guarantees that the folio is mapped | 
|---|
| 937 | * exactly once. For large folios this implies that exactly one | 
|---|
| 938 | * PTE/PMD/... maps (a part of) this folio. | 
|---|
| 939 | * | 
|---|
| 940 | * Treat all non-present entries (where relying on the mapcount and | 
|---|
| 941 | * refcount doesn't make sense) as "maybe shared, but not sure how | 
|---|
| 942 | * often". We treat device private entries as being fake-present. | 
|---|
| 943 | * | 
|---|
| 944 | * Note that it would not be safe to read the mapcount especially for | 
|---|
| 945 | * pages referenced by migration entries, even with the PTL held. | 
|---|
| 946 | */ | 
|---|
| 947 | if (folio_ref_count(folio) == 1 || !present) { | 
|---|
| 948 | smaps_page_accumulate(mss, folio, size, pss: size << PSS_SHIFT, | 
|---|
| 949 | dirty, locked, private: present); | 
|---|
| 950 | return; | 
|---|
| 951 | } | 
|---|
| 952 |  | 
|---|
| 953 | if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { | 
|---|
| 954 | mapcount = folio_average_page_mapcount(folio); | 
|---|
| 955 | exclusive = !folio_maybe_mapped_shared(folio); | 
|---|
| 956 | } | 
|---|
| 957 |  | 
|---|
| 958 | /* | 
|---|
| 959 | * We obtain a snapshot of the mapcount. Without holding the folio lock | 
|---|
| 960 | * this snapshot can be slightly wrong as we cannot always read the | 
|---|
| 961 | * mapcount atomically. | 
|---|
| 962 | */ | 
|---|
| 963 | for (i = 0; i < nr; i++, page++) { | 
|---|
| 964 | unsigned long pss = PAGE_SIZE << PSS_SHIFT; | 
|---|
| 965 |  | 
|---|
| 966 | if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) { | 
|---|
| 967 | mapcount = folio_precise_page_mapcount(folio, page); | 
|---|
| 968 | exclusive = mapcount < 2; | 
|---|
| 969 | } | 
|---|
| 970 |  | 
|---|
| 971 | if (mapcount >= 2) | 
|---|
| 972 | pss /= mapcount; | 
|---|
| 973 | smaps_page_accumulate(mss, folio, PAGE_SIZE, pss, | 
|---|
| 974 | dirty, locked, private: exclusive); | 
|---|
| 975 | } | 
|---|
| 976 | } | 
|---|
| 977 |  | 
|---|
| 978 | #ifdef CONFIG_SHMEM | 
|---|
| 979 | static int smaps_pte_hole(unsigned long addr, unsigned long end, | 
|---|
| 980 | __always_unused int depth, struct mm_walk *walk) | 
|---|
| 981 | { | 
|---|
| 982 | struct mem_size_stats *mss = walk->private; | 
|---|
| 983 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 984 |  | 
|---|
| 985 | mss->swap += shmem_partial_swap_usage(mapping: walk->vma->vm_file->f_mapping, | 
|---|
| 986 | start: linear_page_index(vma, address: addr), | 
|---|
| 987 | end: linear_page_index(vma, address: end)); | 
|---|
| 988 |  | 
|---|
| 989 | return 0; | 
|---|
| 990 | } | 
|---|
| 991 | #else | 
|---|
| 992 | #define smaps_pte_hole		NULL | 
|---|
| 993 | #endif /* CONFIG_SHMEM */ | 
|---|
| 994 |  | 
|---|
| 995 | static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk) | 
|---|
| 996 | { | 
|---|
| 997 | #ifdef CONFIG_SHMEM | 
|---|
| 998 | if (walk->ops->pte_hole) { | 
|---|
| 999 | /* depth is not used */ | 
|---|
| 1000 | smaps_pte_hole(addr, end: addr + PAGE_SIZE, depth: 0, walk); | 
|---|
| 1001 | } | 
|---|
| 1002 | #endif | 
|---|
| 1003 | } | 
|---|
| 1004 |  | 
|---|
| 1005 | static void smaps_pte_entry(pte_t *pte, unsigned long addr, | 
|---|
| 1006 | struct mm_walk *walk) | 
|---|
| 1007 | { | 
|---|
| 1008 | struct mem_size_stats *mss = walk->private; | 
|---|
| 1009 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1010 | bool locked = !!(vma->vm_flags & VM_LOCKED); | 
|---|
| 1011 | struct page *page = NULL; | 
|---|
| 1012 | bool present = false, young = false, dirty = false; | 
|---|
| 1013 | pte_t ptent = ptep_get(ptep: pte); | 
|---|
| 1014 |  | 
|---|
| 1015 | if (pte_present(a: ptent)) { | 
|---|
| 1016 | page = vm_normal_page(vma, addr, pte: ptent); | 
|---|
| 1017 | young = pte_young(pte: ptent); | 
|---|
| 1018 | dirty = pte_dirty(pte: ptent); | 
|---|
| 1019 | present = true; | 
|---|
| 1020 | } else if (is_swap_pte(pte: ptent)) { | 
|---|
| 1021 | swp_entry_t swpent = pte_to_swp_entry(pte: ptent); | 
|---|
| 1022 |  | 
|---|
| 1023 | if (!non_swap_entry(entry: swpent)) { | 
|---|
| 1024 | int mapcount; | 
|---|
| 1025 |  | 
|---|
| 1026 | mss->swap += PAGE_SIZE; | 
|---|
| 1027 | mapcount = swp_swapcount(entry: swpent); | 
|---|
| 1028 | if (mapcount >= 2) { | 
|---|
| 1029 | u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; | 
|---|
| 1030 |  | 
|---|
| 1031 | do_div(pss_delta, mapcount); | 
|---|
| 1032 | mss->swap_pss += pss_delta; | 
|---|
| 1033 | } else { | 
|---|
| 1034 | mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; | 
|---|
| 1035 | } | 
|---|
| 1036 | } else if (is_pfn_swap_entry(entry: swpent)) { | 
|---|
| 1037 | if (is_device_private_entry(entry: swpent)) | 
|---|
| 1038 | present = true; | 
|---|
| 1039 | page = pfn_swap_entry_to_page(entry: swpent); | 
|---|
| 1040 | } | 
|---|
| 1041 | } else { | 
|---|
| 1042 | smaps_pte_hole_lookup(addr, walk); | 
|---|
| 1043 | return; | 
|---|
| 1044 | } | 
|---|
| 1045 |  | 
|---|
| 1046 | if (!page) | 
|---|
| 1047 | return; | 
|---|
| 1048 |  | 
|---|
| 1049 | smaps_account(mss, page, compound: false, young, dirty, locked, present); | 
|---|
| 1050 | } | 
|---|
| 1051 |  | 
|---|
| 1052 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 1053 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | 
|---|
| 1054 | struct mm_walk *walk) | 
|---|
| 1055 | { | 
|---|
| 1056 | struct mem_size_stats *mss = walk->private; | 
|---|
| 1057 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1058 | bool locked = !!(vma->vm_flags & VM_LOCKED); | 
|---|
| 1059 | struct page *page = NULL; | 
|---|
| 1060 | bool present = false; | 
|---|
| 1061 | struct folio *folio; | 
|---|
| 1062 |  | 
|---|
| 1063 | if (pmd_present(*pmd)) { | 
|---|
| 1064 | page = vm_normal_page_pmd(vma, addr, *pmd); | 
|---|
| 1065 | present = true; | 
|---|
| 1066 | } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { | 
|---|
| 1067 | swp_entry_t entry = pmd_to_swp_entry(*pmd); | 
|---|
| 1068 |  | 
|---|
| 1069 | if (is_pfn_swap_entry(entry)) | 
|---|
| 1070 | page = pfn_swap_entry_to_page(entry); | 
|---|
| 1071 | } | 
|---|
| 1072 | if (IS_ERR_OR_NULL(page)) | 
|---|
| 1073 | return; | 
|---|
| 1074 | folio = page_folio(page); | 
|---|
| 1075 | if (folio_test_anon(folio)) | 
|---|
| 1076 | mss->anonymous_thp += HPAGE_PMD_SIZE; | 
|---|
| 1077 | else if (folio_test_swapbacked(folio)) | 
|---|
| 1078 | mss->shmem_thp += HPAGE_PMD_SIZE; | 
|---|
| 1079 | else if (folio_is_zone_device(folio)) | 
|---|
| 1080 | /* pass */; | 
|---|
| 1081 | else | 
|---|
| 1082 | mss->file_thp += HPAGE_PMD_SIZE; | 
|---|
| 1083 |  | 
|---|
| 1084 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), | 
|---|
| 1085 | locked, present); | 
|---|
| 1086 | } | 
|---|
| 1087 | #else | 
|---|
| 1088 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | 
|---|
| 1089 | struct mm_walk *walk) | 
|---|
| 1090 | { | 
|---|
| 1091 | } | 
|---|
| 1092 | #endif | 
|---|
| 1093 |  | 
|---|
| 1094 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | 
|---|
| 1095 | struct mm_walk *walk) | 
|---|
| 1096 | { | 
|---|
| 1097 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1098 | pte_t *pte; | 
|---|
| 1099 | spinlock_t *ptl; | 
|---|
| 1100 |  | 
|---|
| 1101 | ptl = pmd_trans_huge_lock(pmd, vma); | 
|---|
| 1102 | if (ptl) { | 
|---|
| 1103 | smaps_pmd_entry(pmd, addr, walk); | 
|---|
| 1104 | spin_unlock(lock: ptl); | 
|---|
| 1105 | goto out; | 
|---|
| 1106 | } | 
|---|
| 1107 |  | 
|---|
| 1108 | pte = pte_offset_map_lock(mm: vma->vm_mm, pmd, addr, ptlp: &ptl); | 
|---|
| 1109 | if (!pte) { | 
|---|
| 1110 | walk->action = ACTION_AGAIN; | 
|---|
| 1111 | return 0; | 
|---|
| 1112 | } | 
|---|
| 1113 | for (; addr != end; pte++, addr += PAGE_SIZE) | 
|---|
| 1114 | smaps_pte_entry(pte, addr, walk); | 
|---|
| 1115 | pte_unmap_unlock(pte - 1, ptl); | 
|---|
| 1116 | out: | 
|---|
| 1117 | cond_resched(); | 
|---|
| 1118 | return 0; | 
|---|
| 1119 | } | 
|---|
| 1120 |  | 
|---|
| 1121 | static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) | 
|---|
| 1122 | { | 
|---|
| 1123 | /* | 
|---|
| 1124 | * Don't forget to update Documentation/ on changes. | 
|---|
| 1125 | * | 
|---|
| 1126 | * The length of the second argument of mnemonics[] | 
|---|
| 1127 | * needs to be 3 instead of previously set 2 | 
|---|
| 1128 | * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3]) | 
|---|
| 1129 | * to avoid spurious | 
|---|
| 1130 | * -Werror=unterminated-string-initialization warning | 
|---|
| 1131 | *  with GCC 15 | 
|---|
| 1132 | */ | 
|---|
| 1133 | static const char mnemonics[BITS_PER_LONG][3] = { | 
|---|
| 1134 | /* | 
|---|
| 1135 | * In case if we meet a flag we don't know about. | 
|---|
| 1136 | */ | 
|---|
| 1137 | [0 ... (BITS_PER_LONG-1)] = "??", | 
|---|
| 1138 |  | 
|---|
| 1139 | [ilog2(VM_READ)]	= "rd", | 
|---|
| 1140 | [ilog2(VM_WRITE)]	= "wr", | 
|---|
| 1141 | [ilog2(VM_EXEC)]	= "ex", | 
|---|
| 1142 | [ilog2(VM_SHARED)]	= "sh", | 
|---|
| 1143 | [ilog2(VM_MAYREAD)]	= "mr", | 
|---|
| 1144 | [ilog2(VM_MAYWRITE)]	= "mw", | 
|---|
| 1145 | [ilog2(VM_MAYEXEC)]	= "me", | 
|---|
| 1146 | [ilog2(VM_MAYSHARE)]	= "ms", | 
|---|
| 1147 | [ilog2(VM_GROWSDOWN)]	= "gd", | 
|---|
| 1148 | [ilog2(VM_PFNMAP)]	= "pf", | 
|---|
| 1149 | [ilog2(VM_LOCKED)]	= "lo", | 
|---|
| 1150 | [ilog2(VM_IO)]		= "io", | 
|---|
| 1151 | [ilog2(VM_SEQ_READ)]	= "sr", | 
|---|
| 1152 | [ilog2(VM_RAND_READ)]	= "rr", | 
|---|
| 1153 | [ilog2(VM_DONTCOPY)]	= "dc", | 
|---|
| 1154 | [ilog2(VM_DONTEXPAND)]	= "de", | 
|---|
| 1155 | [ilog2(VM_LOCKONFAULT)]	= "lf", | 
|---|
| 1156 | [ilog2(VM_ACCOUNT)]	= "ac", | 
|---|
| 1157 | [ilog2(VM_NORESERVE)]	= "nr", | 
|---|
| 1158 | [ilog2(VM_HUGETLB)]	= "ht", | 
|---|
| 1159 | [ilog2(VM_SYNC)]	= "sf", | 
|---|
| 1160 | [ilog2(VM_ARCH_1)]	= "ar", | 
|---|
| 1161 | [ilog2(VM_WIPEONFORK)]	= "wf", | 
|---|
| 1162 | [ilog2(VM_DONTDUMP)]	= "dd", | 
|---|
| 1163 | #ifdef CONFIG_ARM64_BTI | 
|---|
| 1164 | [ilog2(VM_ARM64_BTI)]	= "bt", | 
|---|
| 1165 | #endif | 
|---|
| 1166 | #ifdef CONFIG_MEM_SOFT_DIRTY | 
|---|
| 1167 | [ilog2(VM_SOFTDIRTY)]	= "sd", | 
|---|
| 1168 | #endif | 
|---|
| 1169 | [ilog2(VM_MIXEDMAP)]	= "mm", | 
|---|
| 1170 | [ilog2(VM_HUGEPAGE)]	= "hg", | 
|---|
| 1171 | [ilog2(VM_NOHUGEPAGE)]	= "nh", | 
|---|
| 1172 | [ilog2(VM_MERGEABLE)]	= "mg", | 
|---|
| 1173 | [ilog2(VM_UFFD_MISSING)]= "um", | 
|---|
| 1174 | [ilog2(VM_UFFD_WP)]	= "uw", | 
|---|
| 1175 | #ifdef CONFIG_ARM64_MTE | 
|---|
| 1176 | [ilog2(VM_MTE)]		= "mt", | 
|---|
| 1177 | [ilog2(VM_MTE_ALLOWED)]	= "", | 
|---|
| 1178 | #endif | 
|---|
| 1179 | #ifdef CONFIG_ARCH_HAS_PKEYS | 
|---|
| 1180 | /* These come out via ProtectionKey: */ | 
|---|
| 1181 | [ilog2(VM_PKEY_BIT0)]	= "", | 
|---|
| 1182 | [ilog2(VM_PKEY_BIT1)]	= "", | 
|---|
| 1183 | [ilog2(VM_PKEY_BIT2)]	= "", | 
|---|
| 1184 | #if VM_PKEY_BIT3 | 
|---|
| 1185 | [ilog2(VM_PKEY_BIT3)]	= "", | 
|---|
| 1186 | #endif | 
|---|
| 1187 | #if VM_PKEY_BIT4 | 
|---|
| 1188 | [ilog2(VM_PKEY_BIT4)]	= "", | 
|---|
| 1189 | #endif | 
|---|
| 1190 | #endif /* CONFIG_ARCH_HAS_PKEYS */ | 
|---|
| 1191 | #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR | 
|---|
| 1192 | [ilog2(VM_UFFD_MINOR)]	= "ui", | 
|---|
| 1193 | #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ | 
|---|
| 1194 | #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK | 
|---|
| 1195 | [ilog2(VM_SHADOW_STACK)] = "ss", | 
|---|
| 1196 | #endif | 
|---|
| 1197 | #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) | 
|---|
| 1198 | [ilog2(VM_DROPPABLE)] = "dp", | 
|---|
| 1199 | #endif | 
|---|
| 1200 | #ifdef CONFIG_64BIT | 
|---|
| 1201 | [ilog2(VM_SEALED)] = "sl", | 
|---|
| 1202 | #endif | 
|---|
| 1203 | }; | 
|---|
| 1204 | size_t i; | 
|---|
| 1205 |  | 
|---|
| 1206 | seq_puts(m, s: "VmFlags: "); | 
|---|
| 1207 | for (i = 0; i < BITS_PER_LONG; i++) { | 
|---|
| 1208 | if (!mnemonics[i][0]) | 
|---|
| 1209 | continue; | 
|---|
| 1210 | if (vma->vm_flags & (1UL << i)) | 
|---|
| 1211 | seq_printf(m, fmt: "%s ", mnemonics[i]); | 
|---|
| 1212 | } | 
|---|
| 1213 | seq_putc(m, c: '\n'); | 
|---|
| 1214 | } | 
|---|
| 1215 |  | 
|---|
| 1216 | #ifdef CONFIG_HUGETLB_PAGE | 
|---|
| 1217 | static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, | 
|---|
| 1218 | unsigned long addr, unsigned long end, | 
|---|
| 1219 | struct mm_walk *walk) | 
|---|
| 1220 | { | 
|---|
| 1221 | struct mem_size_stats *mss = walk->private; | 
|---|
| 1222 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1223 | struct folio *folio = NULL; | 
|---|
| 1224 | bool present = false; | 
|---|
| 1225 | spinlock_t *ptl; | 
|---|
| 1226 | pte_t ptent; | 
|---|
| 1227 |  | 
|---|
| 1228 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: walk->mm, pte); | 
|---|
| 1229 | ptent = huge_ptep_get(mm: walk->mm, addr, ptep: pte); | 
|---|
| 1230 | if (pte_present(a: ptent)) { | 
|---|
| 1231 | folio = page_folio(pte_page(ptent)); | 
|---|
| 1232 | present = true; | 
|---|
| 1233 | } else if (is_swap_pte(pte: ptent)) { | 
|---|
| 1234 | swp_entry_t swpent = pte_to_swp_entry(pte: ptent); | 
|---|
| 1235 |  | 
|---|
| 1236 | if (is_pfn_swap_entry(entry: swpent)) | 
|---|
| 1237 | folio = pfn_swap_entry_folio(entry: swpent); | 
|---|
| 1238 | } | 
|---|
| 1239 |  | 
|---|
| 1240 | if (folio) { | 
|---|
| 1241 | /* We treat non-present entries as "maybe shared". */ | 
|---|
| 1242 | if (!present || folio_maybe_mapped_shared(folio) || | 
|---|
| 1243 | hugetlb_pmd_shared(pte)) | 
|---|
| 1244 | mss->shared_hugetlb += huge_page_size(h: hstate_vma(vma)); | 
|---|
| 1245 | else | 
|---|
| 1246 | mss->private_hugetlb += huge_page_size(h: hstate_vma(vma)); | 
|---|
| 1247 | } | 
|---|
| 1248 | spin_unlock(lock: ptl); | 
|---|
| 1249 | return 0; | 
|---|
| 1250 | } | 
|---|
| 1251 | #else | 
|---|
| 1252 | #define smaps_hugetlb_range	NULL | 
|---|
| 1253 | #endif /* HUGETLB_PAGE */ | 
|---|
| 1254 |  | 
|---|
| 1255 | static const struct mm_walk_ops smaps_walk_ops = { | 
|---|
| 1256 | .pmd_entry		= smaps_pte_range, | 
|---|
| 1257 | .hugetlb_entry		= smaps_hugetlb_range, | 
|---|
| 1258 | .walk_lock		= PGWALK_RDLOCK, | 
|---|
| 1259 | }; | 
|---|
| 1260 |  | 
|---|
| 1261 | static const struct mm_walk_ops smaps_shmem_walk_ops = { | 
|---|
| 1262 | .pmd_entry		= smaps_pte_range, | 
|---|
| 1263 | .hugetlb_entry		= smaps_hugetlb_range, | 
|---|
| 1264 | .pte_hole		= smaps_pte_hole, | 
|---|
| 1265 | .walk_lock		= PGWALK_RDLOCK, | 
|---|
| 1266 | }; | 
|---|
| 1267 |  | 
|---|
| 1268 | /* | 
|---|
| 1269 | * Gather mem stats from @vma with the indicated beginning | 
|---|
| 1270 | * address @start, and keep them in @mss. | 
|---|
| 1271 | * | 
|---|
| 1272 | * Use vm_start of @vma as the beginning address if @start is 0. | 
|---|
| 1273 | */ | 
|---|
| 1274 | static void smap_gather_stats(struct vm_area_struct *vma, | 
|---|
| 1275 | struct mem_size_stats *mss, unsigned long start) | 
|---|
| 1276 | { | 
|---|
| 1277 | const struct mm_walk_ops *ops = &smaps_walk_ops; | 
|---|
| 1278 |  | 
|---|
| 1279 | /* Invalid start */ | 
|---|
| 1280 | if (start >= vma->vm_end) | 
|---|
| 1281 | return; | 
|---|
| 1282 |  | 
|---|
| 1283 | if (vma->vm_file && shmem_mapping(mapping: vma->vm_file->f_mapping)) { | 
|---|
| 1284 | /* | 
|---|
| 1285 | * For shared or readonly shmem mappings we know that all | 
|---|
| 1286 | * swapped out pages belong to the shmem object, and we can | 
|---|
| 1287 | * obtain the swap value much more efficiently. For private | 
|---|
| 1288 | * writable mappings, we might have COW pages that are | 
|---|
| 1289 | * not affected by the parent swapped out pages of the shmem | 
|---|
| 1290 | * object, so we have to distinguish them during the page walk. | 
|---|
| 1291 | * Unless we know that the shmem object (or the part mapped by | 
|---|
| 1292 | * our VMA) has no swapped out pages at all. | 
|---|
| 1293 | */ | 
|---|
| 1294 | unsigned long shmem_swapped = shmem_swap_usage(vma); | 
|---|
| 1295 |  | 
|---|
| 1296 | if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || | 
|---|
| 1297 | !(vma->vm_flags & VM_WRITE))) { | 
|---|
| 1298 | mss->swap += shmem_swapped; | 
|---|
| 1299 | } else { | 
|---|
| 1300 | ops = &smaps_shmem_walk_ops; | 
|---|
| 1301 | } | 
|---|
| 1302 | } | 
|---|
| 1303 |  | 
|---|
| 1304 | /* mmap_lock is held in m_start */ | 
|---|
| 1305 | if (!start) | 
|---|
| 1306 | walk_page_vma(vma, ops, private: mss); | 
|---|
| 1307 | else | 
|---|
| 1308 | walk_page_range(mm: vma->vm_mm, start, end: vma->vm_end, ops, private: mss); | 
|---|
| 1309 | } | 
|---|
| 1310 |  | 
|---|
| 1311 | #define SEQ_PUT_DEC(str, val) \ | 
|---|
| 1312 | seq_put_decimal_ull_width(m, str, (val) >> 10, 8) | 
|---|
| 1313 |  | 
|---|
| 1314 | /* Show the contents common for smaps and smaps_rollup */ | 
|---|
| 1315 | static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, | 
|---|
| 1316 | bool rollup_mode) | 
|---|
| 1317 | { | 
|---|
| 1318 | SEQ_PUT_DEC( "Rss:            ", mss->resident); | 
|---|
| 1319 | SEQ_PUT_DEC( " kB\nPss:            ", mss->pss >> PSS_SHIFT); | 
|---|
| 1320 | SEQ_PUT_DEC( " kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT); | 
|---|
| 1321 | if (rollup_mode) { | 
|---|
| 1322 | /* | 
|---|
| 1323 | * These are meaningful only for smaps_rollup, otherwise two of | 
|---|
| 1324 | * them are zero, and the other one is the same as Pss. | 
|---|
| 1325 | */ | 
|---|
| 1326 | SEQ_PUT_DEC( " kB\nPss_Anon:       ", | 
|---|
| 1327 | mss->pss_anon >> PSS_SHIFT); | 
|---|
| 1328 | SEQ_PUT_DEC( " kB\nPss_File:       ", | 
|---|
| 1329 | mss->pss_file >> PSS_SHIFT); | 
|---|
| 1330 | SEQ_PUT_DEC( " kB\nPss_Shmem:      ", | 
|---|
| 1331 | mss->pss_shmem >> PSS_SHIFT); | 
|---|
| 1332 | } | 
|---|
| 1333 | SEQ_PUT_DEC( " kB\nShared_Clean:   ", mss->shared_clean); | 
|---|
| 1334 | SEQ_PUT_DEC( " kB\nShared_Dirty:   ", mss->shared_dirty); | 
|---|
| 1335 | SEQ_PUT_DEC( " kB\nPrivate_Clean:  ", mss->private_clean); | 
|---|
| 1336 | SEQ_PUT_DEC( " kB\nPrivate_Dirty:  ", mss->private_dirty); | 
|---|
| 1337 | SEQ_PUT_DEC( " kB\nReferenced:     ", mss->referenced); | 
|---|
| 1338 | SEQ_PUT_DEC( " kB\nAnonymous:      ", mss->anonymous); | 
|---|
| 1339 | SEQ_PUT_DEC( " kB\nKSM:            ", mss->ksm); | 
|---|
| 1340 | SEQ_PUT_DEC( " kB\nLazyFree:       ", mss->lazyfree); | 
|---|
| 1341 | SEQ_PUT_DEC( " kB\nAnonHugePages:  ", mss->anonymous_thp); | 
|---|
| 1342 | SEQ_PUT_DEC( " kB\nShmemPmdMapped: ", mss->shmem_thp); | 
|---|
| 1343 | SEQ_PUT_DEC( " kB\nFilePmdMapped:  ", mss->file_thp); | 
|---|
| 1344 | SEQ_PUT_DEC( " kB\nShared_Hugetlb: ", mss->shared_hugetlb); | 
|---|
| 1345 | seq_put_decimal_ull_width(m, delimiter: " kB\nPrivate_Hugetlb: ", | 
|---|
| 1346 | num: mss->private_hugetlb >> 10, width: 7); | 
|---|
| 1347 | SEQ_PUT_DEC( " kB\nSwap:           ", mss->swap); | 
|---|
| 1348 | SEQ_PUT_DEC( " kB\nSwapPss:        ", | 
|---|
| 1349 | mss->swap_pss >> PSS_SHIFT); | 
|---|
| 1350 | SEQ_PUT_DEC( " kB\nLocked:         ", | 
|---|
| 1351 | mss->pss_locked >> PSS_SHIFT); | 
|---|
| 1352 | seq_puts(m, s: " kB\n"); | 
|---|
| 1353 | } | 
|---|
| 1354 |  | 
|---|
| 1355 | static int show_smap(struct seq_file *m, void *v) | 
|---|
| 1356 | { | 
|---|
| 1357 | struct vm_area_struct *vma = v; | 
|---|
| 1358 | struct mem_size_stats mss = {}; | 
|---|
| 1359 |  | 
|---|
| 1360 | smap_gather_stats(vma, mss: &mss, start: 0); | 
|---|
| 1361 |  | 
|---|
| 1362 | show_map_vma(m, vma); | 
|---|
| 1363 |  | 
|---|
| 1364 | SEQ_PUT_DEC( "Size:           ", vma->vm_end - vma->vm_start); | 
|---|
| 1365 | SEQ_PUT_DEC( " kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); | 
|---|
| 1366 | SEQ_PUT_DEC( " kB\nMMUPageSize:    ", vma_mmu_pagesize(vma)); | 
|---|
| 1367 | seq_puts(m, s: " kB\n"); | 
|---|
| 1368 |  | 
|---|
| 1369 | __show_smap(m, mss: &mss, rollup_mode: false); | 
|---|
| 1370 |  | 
|---|
| 1371 | seq_printf(m, fmt: "THPeligible:    %8u\n", | 
|---|
| 1372 | !!thp_vma_allowable_orders(vma, vm_flags: vma->vm_flags, type: TVA_SMAPS, | 
|---|
| 1373 | THP_ORDERS_ALL)); | 
|---|
| 1374 |  | 
|---|
| 1375 | if (arch_pkeys_enabled()) | 
|---|
| 1376 | seq_printf(m, fmt: "ProtectionKey:  %8u\n", vma_pkey(vma)); | 
|---|
| 1377 | show_smap_vma_flags(m, vma); | 
|---|
| 1378 |  | 
|---|
| 1379 | return 0; | 
|---|
| 1380 | } | 
|---|
| 1381 |  | 
|---|
| 1382 | static int show_smaps_rollup(struct seq_file *m, void *v) | 
|---|
| 1383 | { | 
|---|
| 1384 | struct proc_maps_private *priv = m->private; | 
|---|
| 1385 | struct mem_size_stats mss = {}; | 
|---|
| 1386 | struct mm_struct *mm = priv->lock_ctx.mm; | 
|---|
| 1387 | struct vm_area_struct *vma; | 
|---|
| 1388 | unsigned long vma_start = 0, last_vma_end = 0; | 
|---|
| 1389 | int ret = 0; | 
|---|
| 1390 | VMA_ITERATOR(vmi, mm, 0); | 
|---|
| 1391 |  | 
|---|
| 1392 | priv->task = get_proc_task(inode: priv->inode); | 
|---|
| 1393 | if (!priv->task) | 
|---|
| 1394 | return -ESRCH; | 
|---|
| 1395 |  | 
|---|
| 1396 | if (!mm || !mmget_not_zero(mm)) { | 
|---|
| 1397 | ret = -ESRCH; | 
|---|
| 1398 | goto out_put_task; | 
|---|
| 1399 | } | 
|---|
| 1400 |  | 
|---|
| 1401 | ret = mmap_read_lock_killable(mm); | 
|---|
| 1402 | if (ret) | 
|---|
| 1403 | goto out_put_mm; | 
|---|
| 1404 |  | 
|---|
| 1405 | hold_task_mempolicy(priv); | 
|---|
| 1406 | vma = vma_next(vmi: &vmi); | 
|---|
| 1407 |  | 
|---|
| 1408 | if (unlikely(!vma)) | 
|---|
| 1409 | goto empty_set; | 
|---|
| 1410 |  | 
|---|
| 1411 | vma_start = vma->vm_start; | 
|---|
| 1412 | do { | 
|---|
| 1413 | smap_gather_stats(vma, mss: &mss, start: 0); | 
|---|
| 1414 | last_vma_end = vma->vm_end; | 
|---|
| 1415 |  | 
|---|
| 1416 | /* | 
|---|
| 1417 | * Release mmap_lock temporarily if someone wants to | 
|---|
| 1418 | * access it for write request. | 
|---|
| 1419 | */ | 
|---|
| 1420 | if (mmap_lock_is_contended(mm)) { | 
|---|
| 1421 | vma_iter_invalidate(vmi: &vmi); | 
|---|
| 1422 | mmap_read_unlock(mm); | 
|---|
| 1423 | ret = mmap_read_lock_killable(mm); | 
|---|
| 1424 | if (ret) { | 
|---|
| 1425 | release_task_mempolicy(priv); | 
|---|
| 1426 | goto out_put_mm; | 
|---|
| 1427 | } | 
|---|
| 1428 |  | 
|---|
| 1429 | /* | 
|---|
| 1430 | * After dropping the lock, there are four cases to | 
|---|
| 1431 | * consider. See the following example for explanation. | 
|---|
| 1432 | * | 
|---|
| 1433 | *   +------+------+-----------+ | 
|---|
| 1434 | *   | VMA1 | VMA2 | VMA3      | | 
|---|
| 1435 | *   +------+------+-----------+ | 
|---|
| 1436 | *   |      |      |           | | 
|---|
| 1437 | *  4k     8k     16k         400k | 
|---|
| 1438 | * | 
|---|
| 1439 | * Suppose we drop the lock after reading VMA2 due to | 
|---|
| 1440 | * contention, then we get: | 
|---|
| 1441 | * | 
|---|
| 1442 | *	last_vma_end = 16k | 
|---|
| 1443 | * | 
|---|
| 1444 | * 1) VMA2 is freed, but VMA3 exists: | 
|---|
| 1445 | * | 
|---|
| 1446 | *    vma_next(vmi) will return VMA3. | 
|---|
| 1447 | *    In this case, just continue from VMA3. | 
|---|
| 1448 | * | 
|---|
| 1449 | * 2) VMA2 still exists: | 
|---|
| 1450 | * | 
|---|
| 1451 | *    vma_next(vmi) will return VMA3. | 
|---|
| 1452 | *    In this case, just continue from VMA3. | 
|---|
| 1453 | * | 
|---|
| 1454 | * 3) No more VMAs can be found: | 
|---|
| 1455 | * | 
|---|
| 1456 | *    vma_next(vmi) will return NULL. | 
|---|
| 1457 | *    No more things to do, just break. | 
|---|
| 1458 | * | 
|---|
| 1459 | * 4) (last_vma_end - 1) is the middle of a vma (VMA'): | 
|---|
| 1460 | * | 
|---|
| 1461 | *    vma_next(vmi) will return VMA' whose range | 
|---|
| 1462 | *    contains last_vma_end. | 
|---|
| 1463 | *    Iterate VMA' from last_vma_end. | 
|---|
| 1464 | */ | 
|---|
| 1465 | vma = vma_next(vmi: &vmi); | 
|---|
| 1466 | /* Case 3 above */ | 
|---|
| 1467 | if (!vma) | 
|---|
| 1468 | break; | 
|---|
| 1469 |  | 
|---|
| 1470 | /* Case 1 and 2 above */ | 
|---|
| 1471 | if (vma->vm_start >= last_vma_end) { | 
|---|
| 1472 | smap_gather_stats(vma, mss: &mss, start: 0); | 
|---|
| 1473 | last_vma_end = vma->vm_end; | 
|---|
| 1474 | continue; | 
|---|
| 1475 | } | 
|---|
| 1476 |  | 
|---|
| 1477 | /* Case 4 above */ | 
|---|
| 1478 | if (vma->vm_end > last_vma_end) { | 
|---|
| 1479 | smap_gather_stats(vma, mss: &mss, start: last_vma_end); | 
|---|
| 1480 | last_vma_end = vma->vm_end; | 
|---|
| 1481 | } | 
|---|
| 1482 | } | 
|---|
| 1483 | } for_each_vma(vmi, vma); | 
|---|
| 1484 |  | 
|---|
| 1485 | empty_set: | 
|---|
| 1486 | show_vma_header_prefix(m, start: vma_start, end: last_vma_end, flags: 0, pgoff: 0, dev: 0, ino: 0); | 
|---|
| 1487 | seq_pad(m, c: ' '); | 
|---|
| 1488 | seq_puts(m, s: "[rollup]\n"); | 
|---|
| 1489 |  | 
|---|
| 1490 | __show_smap(m, mss: &mss, rollup_mode: true); | 
|---|
| 1491 |  | 
|---|
| 1492 | release_task_mempolicy(priv); | 
|---|
| 1493 | mmap_read_unlock(mm); | 
|---|
| 1494 |  | 
|---|
| 1495 | out_put_mm: | 
|---|
| 1496 | mmput(mm); | 
|---|
| 1497 | out_put_task: | 
|---|
| 1498 | put_task_struct(t: priv->task); | 
|---|
| 1499 | priv->task = NULL; | 
|---|
| 1500 |  | 
|---|
| 1501 | return ret; | 
|---|
| 1502 | } | 
|---|
| 1503 | #undef SEQ_PUT_DEC | 
|---|
| 1504 |  | 
|---|
| 1505 | static const struct seq_operations proc_pid_smaps_op = { | 
|---|
| 1506 | .start	= m_start, | 
|---|
| 1507 | .next	= m_next, | 
|---|
| 1508 | .stop	= m_stop, | 
|---|
| 1509 | .show	= show_smap | 
|---|
| 1510 | }; | 
|---|
| 1511 |  | 
|---|
| 1512 | static int pid_smaps_open(struct inode *inode, struct file *file) | 
|---|
| 1513 | { | 
|---|
| 1514 | return do_maps_open(inode, file, ops: &proc_pid_smaps_op); | 
|---|
| 1515 | } | 
|---|
| 1516 |  | 
|---|
| 1517 | static int smaps_rollup_open(struct inode *inode, struct file *file) | 
|---|
| 1518 | { | 
|---|
| 1519 | int ret; | 
|---|
| 1520 | struct proc_maps_private *priv; | 
|---|
| 1521 |  | 
|---|
| 1522 | priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT); | 
|---|
| 1523 | if (!priv) | 
|---|
| 1524 | return -ENOMEM; | 
|---|
| 1525 |  | 
|---|
| 1526 | ret = single_open(file, show_smaps_rollup, priv); | 
|---|
| 1527 | if (ret) | 
|---|
| 1528 | goto out_free; | 
|---|
| 1529 |  | 
|---|
| 1530 | priv->inode = inode; | 
|---|
| 1531 | priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ); | 
|---|
| 1532 | if (IS_ERR_OR_NULL(ptr: priv->lock_ctx.mm)) { | 
|---|
| 1533 | ret = priv->lock_ctx.mm ? PTR_ERR(ptr: priv->lock_ctx.mm) : -ESRCH; | 
|---|
| 1534 |  | 
|---|
| 1535 | single_release(inode, file); | 
|---|
| 1536 | goto out_free; | 
|---|
| 1537 | } | 
|---|
| 1538 |  | 
|---|
| 1539 | return 0; | 
|---|
| 1540 |  | 
|---|
| 1541 | out_free: | 
|---|
| 1542 | kfree(objp: priv); | 
|---|
| 1543 | return ret; | 
|---|
| 1544 | } | 
|---|
| 1545 |  | 
|---|
| 1546 | static int smaps_rollup_release(struct inode *inode, struct file *file) | 
|---|
| 1547 | { | 
|---|
| 1548 | struct seq_file *seq = file->private_data; | 
|---|
| 1549 | struct proc_maps_private *priv = seq->private; | 
|---|
| 1550 |  | 
|---|
| 1551 | if (priv->lock_ctx.mm) | 
|---|
| 1552 | mmdrop(mm: priv->lock_ctx.mm); | 
|---|
| 1553 |  | 
|---|
| 1554 | kfree(objp: priv); | 
|---|
| 1555 | return single_release(inode, file); | 
|---|
| 1556 | } | 
|---|
| 1557 |  | 
|---|
| 1558 | const struct file_operations proc_pid_smaps_operations = { | 
|---|
| 1559 | .open		= pid_smaps_open, | 
|---|
| 1560 | .read		= seq_read, | 
|---|
| 1561 | .llseek		= seq_lseek, | 
|---|
| 1562 | .release	= proc_map_release, | 
|---|
| 1563 | }; | 
|---|
| 1564 |  | 
|---|
| 1565 | const struct file_operations proc_pid_smaps_rollup_operations = { | 
|---|
| 1566 | .open		= smaps_rollup_open, | 
|---|
| 1567 | .read		= seq_read, | 
|---|
| 1568 | .llseek		= seq_lseek, | 
|---|
| 1569 | .release	= smaps_rollup_release, | 
|---|
| 1570 | }; | 
|---|
| 1571 |  | 
|---|
| 1572 | enum clear_refs_types { | 
|---|
| 1573 | CLEAR_REFS_ALL = 1, | 
|---|
| 1574 | CLEAR_REFS_ANON, | 
|---|
| 1575 | CLEAR_REFS_MAPPED, | 
|---|
| 1576 | CLEAR_REFS_SOFT_DIRTY, | 
|---|
| 1577 | , | 
|---|
| 1578 | CLEAR_REFS_LAST, | 
|---|
| 1579 | }; | 
|---|
| 1580 |  | 
|---|
| 1581 | struct clear_refs_private { | 
|---|
| 1582 | enum clear_refs_types type; | 
|---|
| 1583 | }; | 
|---|
| 1584 |  | 
|---|
| 1585 | #ifdef CONFIG_MEM_SOFT_DIRTY | 
|---|
| 1586 |  | 
|---|
| 1587 | static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 
|---|
| 1588 | { | 
|---|
| 1589 | struct folio *folio; | 
|---|
| 1590 |  | 
|---|
| 1591 | if (!pte_write(pte)) | 
|---|
| 1592 | return false; | 
|---|
| 1593 | if (!is_cow_mapping(vma->vm_flags)) | 
|---|
| 1594 | return false; | 
|---|
| 1595 | if (likely(!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))) | 
|---|
| 1596 | return false; | 
|---|
| 1597 | folio = vm_normal_folio(vma, addr, pte); | 
|---|
| 1598 | if (!folio) | 
|---|
| 1599 | return false; | 
|---|
| 1600 | return folio_maybe_dma_pinned(folio); | 
|---|
| 1601 | } | 
|---|
| 1602 |  | 
|---|
| 1603 | static inline void clear_soft_dirty(struct vm_area_struct *vma, | 
|---|
| 1604 | unsigned long addr, pte_t *pte) | 
|---|
| 1605 | { | 
|---|
| 1606 | /* | 
|---|
| 1607 | * The soft-dirty tracker uses #PF-s to catch writes | 
|---|
| 1608 | * to pages, so write-protect the pte as well. See the | 
|---|
| 1609 | * Documentation/admin-guide/mm/soft-dirty.rst for full description | 
|---|
| 1610 | * of how soft-dirty works. | 
|---|
| 1611 | */ | 
|---|
| 1612 | pte_t ptent = ptep_get(pte); | 
|---|
| 1613 |  | 
|---|
| 1614 | if (pte_present(ptent)) { | 
|---|
| 1615 | pte_t old_pte; | 
|---|
| 1616 |  | 
|---|
| 1617 | if (pte_is_pinned(vma, addr, ptent)) | 
|---|
| 1618 | return; | 
|---|
| 1619 | old_pte = ptep_modify_prot_start(vma, addr, pte); | 
|---|
| 1620 | ptent = pte_wrprotect(old_pte); | 
|---|
| 1621 | ptent = pte_clear_soft_dirty(ptent); | 
|---|
| 1622 | ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); | 
|---|
| 1623 | } else if (is_swap_pte(ptent)) { | 
|---|
| 1624 | ptent = pte_swp_clear_soft_dirty(ptent); | 
|---|
| 1625 | set_pte_at(vma->vm_mm, addr, pte, ptent); | 
|---|
| 1626 | } | 
|---|
| 1627 | } | 
|---|
| 1628 | #else | 
|---|
| 1629 | static inline void clear_soft_dirty(struct vm_area_struct *vma, | 
|---|
| 1630 | unsigned long addr, pte_t *pte) | 
|---|
| 1631 | { | 
|---|
| 1632 | } | 
|---|
| 1633 | #endif | 
|---|
| 1634 |  | 
|---|
| 1635 | #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) | 
|---|
| 1636 | static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, | 
|---|
| 1637 | unsigned long addr, pmd_t *pmdp) | 
|---|
| 1638 | { | 
|---|
| 1639 | pmd_t old, pmd = *pmdp; | 
|---|
| 1640 |  | 
|---|
| 1641 | if (pmd_present(pmd)) { | 
|---|
| 1642 | /* See comment in change_huge_pmd() */ | 
|---|
| 1643 | old = pmdp_invalidate(vma, addr, pmdp); | 
|---|
| 1644 | if (pmd_dirty(old)) | 
|---|
| 1645 | pmd = pmd_mkdirty(pmd); | 
|---|
| 1646 | if (pmd_young(old)) | 
|---|
| 1647 | pmd = pmd_mkyoung(pmd); | 
|---|
| 1648 |  | 
|---|
| 1649 | pmd = pmd_wrprotect(pmd); | 
|---|
| 1650 | pmd = pmd_clear_soft_dirty(pmd); | 
|---|
| 1651 |  | 
|---|
| 1652 | set_pmd_at(vma->vm_mm, addr, pmdp, pmd); | 
|---|
| 1653 | } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { | 
|---|
| 1654 | pmd = pmd_swp_clear_soft_dirty(pmd); | 
|---|
| 1655 | set_pmd_at(vma->vm_mm, addr, pmdp, pmd); | 
|---|
| 1656 | } | 
|---|
| 1657 | } | 
|---|
| 1658 | #else | 
|---|
| 1659 | static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, | 
|---|
| 1660 | unsigned long addr, pmd_t *pmdp) | 
|---|
| 1661 | { | 
|---|
| 1662 | } | 
|---|
| 1663 | #endif | 
|---|
| 1664 |  | 
|---|
| 1665 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | 
|---|
| 1666 | unsigned long end, struct mm_walk *walk) | 
|---|
| 1667 | { | 
|---|
| 1668 | struct clear_refs_private *cp = walk->private; | 
|---|
| 1669 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1670 | pte_t *pte, ptent; | 
|---|
| 1671 | spinlock_t *ptl; | 
|---|
| 1672 | struct folio *folio; | 
|---|
| 1673 |  | 
|---|
| 1674 | ptl = pmd_trans_huge_lock(pmd, vma); | 
|---|
| 1675 | if (ptl) { | 
|---|
| 1676 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { | 
|---|
| 1677 | clear_soft_dirty_pmd(vma, addr, pmdp: pmd); | 
|---|
| 1678 | goto out; | 
|---|
| 1679 | } | 
|---|
| 1680 |  | 
|---|
| 1681 | if (!pmd_present(pmd: *pmd)) | 
|---|
| 1682 | goto out; | 
|---|
| 1683 |  | 
|---|
| 1684 | folio = pmd_folio(*pmd); | 
|---|
| 1685 |  | 
|---|
| 1686 | /* Clear accessed and referenced bits. */ | 
|---|
| 1687 | pmdp_test_and_clear_young(vma, addr, pmdp: pmd); | 
|---|
| 1688 | folio_test_clear_young(folio); | 
|---|
| 1689 | folio_clear_referenced(folio); | 
|---|
| 1690 | out: | 
|---|
| 1691 | spin_unlock(lock: ptl); | 
|---|
| 1692 | return 0; | 
|---|
| 1693 | } | 
|---|
| 1694 |  | 
|---|
| 1695 | pte = pte_offset_map_lock(mm: vma->vm_mm, pmd, addr, ptlp: &ptl); | 
|---|
| 1696 | if (!pte) { | 
|---|
| 1697 | walk->action = ACTION_AGAIN; | 
|---|
| 1698 | return 0; | 
|---|
| 1699 | } | 
|---|
| 1700 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 
|---|
| 1701 | ptent = ptep_get(ptep: pte); | 
|---|
| 1702 |  | 
|---|
| 1703 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { | 
|---|
| 1704 | clear_soft_dirty(vma, addr, pte); | 
|---|
| 1705 | continue; | 
|---|
| 1706 | } | 
|---|
| 1707 |  | 
|---|
| 1708 | if (!pte_present(a: ptent)) | 
|---|
| 1709 | continue; | 
|---|
| 1710 |  | 
|---|
| 1711 | folio = vm_normal_folio(vma, addr, pte: ptent); | 
|---|
| 1712 | if (!folio) | 
|---|
| 1713 | continue; | 
|---|
| 1714 |  | 
|---|
| 1715 | /* Clear accessed and referenced bits. */ | 
|---|
| 1716 | ptep_test_and_clear_young(vma, addr, ptep: pte); | 
|---|
| 1717 | folio_test_clear_young(folio); | 
|---|
| 1718 | folio_clear_referenced(folio); | 
|---|
| 1719 | } | 
|---|
| 1720 | pte_unmap_unlock(pte - 1, ptl); | 
|---|
| 1721 | cond_resched(); | 
|---|
| 1722 | return 0; | 
|---|
| 1723 | } | 
|---|
| 1724 |  | 
|---|
| 1725 | static int clear_refs_test_walk(unsigned long start, unsigned long end, | 
|---|
| 1726 | struct mm_walk *walk) | 
|---|
| 1727 | { | 
|---|
| 1728 | struct clear_refs_private *cp = walk->private; | 
|---|
| 1729 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1730 |  | 
|---|
| 1731 | if (vma->vm_flags & VM_PFNMAP) | 
|---|
| 1732 | return 1; | 
|---|
| 1733 |  | 
|---|
| 1734 | /* | 
|---|
| 1735 | * Writing 1 to /proc/pid/clear_refs affects all pages. | 
|---|
| 1736 | * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. | 
|---|
| 1737 | * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. | 
|---|
| 1738 | * Writing 4 to /proc/pid/clear_refs affects all pages. | 
|---|
| 1739 | */ | 
|---|
| 1740 | if (cp->type == CLEAR_REFS_ANON && vma->vm_file) | 
|---|
| 1741 | return 1; | 
|---|
| 1742 | if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) | 
|---|
| 1743 | return 1; | 
|---|
| 1744 | return 0; | 
|---|
| 1745 | } | 
|---|
| 1746 |  | 
|---|
| 1747 | static const struct mm_walk_ops clear_refs_walk_ops = { | 
|---|
| 1748 | .pmd_entry		= clear_refs_pte_range, | 
|---|
| 1749 | .test_walk		= clear_refs_test_walk, | 
|---|
| 1750 | .walk_lock		= PGWALK_WRLOCK, | 
|---|
| 1751 | }; | 
|---|
| 1752 |  | 
|---|
| 1753 | static ssize_t clear_refs_write(struct file *file, const char __user *buf, | 
|---|
| 1754 | size_t count, loff_t *ppos) | 
|---|
| 1755 | { | 
|---|
| 1756 | struct task_struct *task; | 
|---|
| 1757 | char buffer[PROC_NUMBUF] = {}; | 
|---|
| 1758 | struct mm_struct *mm; | 
|---|
| 1759 | struct vm_area_struct *vma; | 
|---|
| 1760 | enum clear_refs_types type; | 
|---|
| 1761 | int itype; | 
|---|
| 1762 | int rv; | 
|---|
| 1763 |  | 
|---|
| 1764 | if (count > sizeof(buffer) - 1) | 
|---|
| 1765 | count = sizeof(buffer) - 1; | 
|---|
| 1766 | if (copy_from_user(to: buffer, from: buf, n: count)) | 
|---|
| 1767 | return -EFAULT; | 
|---|
| 1768 | rv = kstrtoint(s: strstrip(str: buffer), base: 10, res: &itype); | 
|---|
| 1769 | if (rv < 0) | 
|---|
| 1770 | return rv; | 
|---|
| 1771 | type = (enum clear_refs_types)itype; | 
|---|
| 1772 | if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) | 
|---|
| 1773 | return -EINVAL; | 
|---|
| 1774 |  | 
|---|
| 1775 | task = get_proc_task(inode: file_inode(f: file)); | 
|---|
| 1776 | if (!task) | 
|---|
| 1777 | return -ESRCH; | 
|---|
| 1778 | mm = get_task_mm(task); | 
|---|
| 1779 | if (mm) { | 
|---|
| 1780 | VMA_ITERATOR(vmi, mm, 0); | 
|---|
| 1781 | struct mmu_notifier_range range; | 
|---|
| 1782 | struct clear_refs_private cp = { | 
|---|
| 1783 | .type = type, | 
|---|
| 1784 | }; | 
|---|
| 1785 |  | 
|---|
| 1786 | if (mmap_write_lock_killable(mm)) { | 
|---|
| 1787 | count = -EINTR; | 
|---|
| 1788 | goto out_mm; | 
|---|
| 1789 | } | 
|---|
| 1790 | if (type == CLEAR_REFS_MM_HIWATER_RSS) { | 
|---|
| 1791 | /* | 
|---|
| 1792 | * Writing 5 to /proc/pid/clear_refs resets the peak | 
|---|
| 1793 | * resident set size to this mm's current rss value. | 
|---|
| 1794 | */ | 
|---|
| 1795 | reset_mm_hiwater_rss(mm); | 
|---|
| 1796 | goto out_unlock; | 
|---|
| 1797 | } | 
|---|
| 1798 |  | 
|---|
| 1799 | if (type == CLEAR_REFS_SOFT_DIRTY) { | 
|---|
| 1800 | for_each_vma(vmi, vma) { | 
|---|
| 1801 | if (!(vma->vm_flags & VM_SOFTDIRTY)) | 
|---|
| 1802 | continue; | 
|---|
| 1803 | vm_flags_clear(vma, VM_SOFTDIRTY); | 
|---|
| 1804 | vma_set_page_prot(vma); | 
|---|
| 1805 | } | 
|---|
| 1806 |  | 
|---|
| 1807 | inc_tlb_flush_pending(mm); | 
|---|
| 1808 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_SOFT_DIRTY, | 
|---|
| 1809 | flags: 0, mm, start: 0, end: -1UL); | 
|---|
| 1810 | mmu_notifier_invalidate_range_start(range: &range); | 
|---|
| 1811 | } | 
|---|
| 1812 | walk_page_range(mm, start: 0, end: -1, ops: &clear_refs_walk_ops, private: &cp); | 
|---|
| 1813 | if (type == CLEAR_REFS_SOFT_DIRTY) { | 
|---|
| 1814 | mmu_notifier_invalidate_range_end(range: &range); | 
|---|
| 1815 | flush_tlb_mm(mm); | 
|---|
| 1816 | dec_tlb_flush_pending(mm); | 
|---|
| 1817 | } | 
|---|
| 1818 | out_unlock: | 
|---|
| 1819 | mmap_write_unlock(mm); | 
|---|
| 1820 | out_mm: | 
|---|
| 1821 | mmput(mm); | 
|---|
| 1822 | } | 
|---|
| 1823 | put_task_struct(t: task); | 
|---|
| 1824 |  | 
|---|
| 1825 | return count; | 
|---|
| 1826 | } | 
|---|
| 1827 |  | 
|---|
| 1828 | const struct file_operations proc_clear_refs_operations = { | 
|---|
| 1829 | .write		= clear_refs_write, | 
|---|
| 1830 | .llseek		= noop_llseek, | 
|---|
| 1831 | }; | 
|---|
| 1832 |  | 
|---|
| 1833 | typedef struct { | 
|---|
| 1834 | u64 pme; | 
|---|
| 1835 | } pagemap_entry_t; | 
|---|
| 1836 |  | 
|---|
| 1837 | struct pagemapread { | 
|---|
| 1838 | int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */ | 
|---|
| 1839 | pagemap_entry_t *buffer; | 
|---|
| 1840 | bool show_pfn; | 
|---|
| 1841 | }; | 
|---|
| 1842 |  | 
|---|
| 1843 | #define PAGEMAP_WALK_SIZE	(PMD_SIZE) | 
|---|
| 1844 | #define PAGEMAP_WALK_MASK	(PMD_MASK) | 
|---|
| 1845 |  | 
|---|
| 1846 | #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t) | 
|---|
| 1847 | #define PM_PFRAME_BITS		55 | 
|---|
| 1848 | #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0) | 
|---|
| 1849 | #define PM_SOFT_DIRTY		BIT_ULL(55) | 
|---|
| 1850 | #define PM_MMAP_EXCLUSIVE	BIT_ULL(56) | 
|---|
| 1851 | #define PM_UFFD_WP		BIT_ULL(57) | 
|---|
| 1852 | #define PM_GUARD_REGION		BIT_ULL(58) | 
|---|
| 1853 | #define PM_FILE			BIT_ULL(61) | 
|---|
| 1854 | #define PM_SWAP			BIT_ULL(62) | 
|---|
| 1855 | #define PM_PRESENT		BIT_ULL(63) | 
|---|
| 1856 |  | 
|---|
| 1857 | #define PM_END_OF_BUFFER    1 | 
|---|
| 1858 |  | 
|---|
| 1859 | static inline pagemap_entry_t make_pme(u64 frame, u64 flags) | 
|---|
| 1860 | { | 
|---|
| 1861 | return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; | 
|---|
| 1862 | } | 
|---|
| 1863 |  | 
|---|
| 1864 | static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm) | 
|---|
| 1865 | { | 
|---|
| 1866 | pm->buffer[pm->pos++] = *pme; | 
|---|
| 1867 | if (pm->pos >= pm->len) | 
|---|
| 1868 | return PM_END_OF_BUFFER; | 
|---|
| 1869 | return 0; | 
|---|
| 1870 | } | 
|---|
| 1871 |  | 
|---|
| 1872 | static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page) | 
|---|
| 1873 | { | 
|---|
| 1874 | if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) | 
|---|
| 1875 | return folio_precise_page_mapcount(folio, page) == 1; | 
|---|
| 1876 | return !folio_maybe_mapped_shared(folio); | 
|---|
| 1877 | } | 
|---|
| 1878 |  | 
|---|
| 1879 | static int pagemap_pte_hole(unsigned long start, unsigned long end, | 
|---|
| 1880 | __always_unused int depth, struct mm_walk *walk) | 
|---|
| 1881 | { | 
|---|
| 1882 | struct pagemapread *pm = walk->private; | 
|---|
| 1883 | unsigned long addr = start; | 
|---|
| 1884 | int err = 0; | 
|---|
| 1885 |  | 
|---|
| 1886 | while (addr < end) { | 
|---|
| 1887 | struct vm_area_struct *vma = find_vma(mm: walk->mm, addr); | 
|---|
| 1888 | pagemap_entry_t pme = make_pme(frame: 0, flags: 0); | 
|---|
| 1889 | /* End of address space hole, which we mark as non-present. */ | 
|---|
| 1890 | unsigned long hole_end; | 
|---|
| 1891 |  | 
|---|
| 1892 | if (vma) | 
|---|
| 1893 | hole_end = min(end, vma->vm_start); | 
|---|
| 1894 | else | 
|---|
| 1895 | hole_end = end; | 
|---|
| 1896 |  | 
|---|
| 1897 | for (; addr < hole_end; addr += PAGE_SIZE) { | 
|---|
| 1898 | err = add_to_pagemap(pme: &pme, pm); | 
|---|
| 1899 | if (err) | 
|---|
| 1900 | goto out; | 
|---|
| 1901 | } | 
|---|
| 1902 |  | 
|---|
| 1903 | if (!vma) | 
|---|
| 1904 | break; | 
|---|
| 1905 |  | 
|---|
| 1906 | /* Addresses in the VMA. */ | 
|---|
| 1907 | if (vma->vm_flags & VM_SOFTDIRTY) | 
|---|
| 1908 | pme = make_pme(frame: 0, PM_SOFT_DIRTY); | 
|---|
| 1909 | for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { | 
|---|
| 1910 | err = add_to_pagemap(pme: &pme, pm); | 
|---|
| 1911 | if (err) | 
|---|
| 1912 | goto out; | 
|---|
| 1913 | } | 
|---|
| 1914 | } | 
|---|
| 1915 | out: | 
|---|
| 1916 | return err; | 
|---|
| 1917 | } | 
|---|
| 1918 |  | 
|---|
| 1919 | static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, | 
|---|
| 1920 | struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 
|---|
| 1921 | { | 
|---|
| 1922 | u64 frame = 0, flags = 0; | 
|---|
| 1923 | struct page *page = NULL; | 
|---|
| 1924 | struct folio *folio; | 
|---|
| 1925 |  | 
|---|
| 1926 | if (pte_present(a: pte)) { | 
|---|
| 1927 | if (pm->show_pfn) | 
|---|
| 1928 | frame = pte_pfn(pte); | 
|---|
| 1929 | flags |= PM_PRESENT; | 
|---|
| 1930 | page = vm_normal_page(vma, addr, pte); | 
|---|
| 1931 | if (pte_soft_dirty(pte)) | 
|---|
| 1932 | flags |= PM_SOFT_DIRTY; | 
|---|
| 1933 | if (pte_uffd_wp(pte)) | 
|---|
| 1934 | flags |= PM_UFFD_WP; | 
|---|
| 1935 | } else if (is_swap_pte(pte)) { | 
|---|
| 1936 | swp_entry_t entry; | 
|---|
| 1937 | if (pte_swp_soft_dirty(pte)) | 
|---|
| 1938 | flags |= PM_SOFT_DIRTY; | 
|---|
| 1939 | if (pte_swp_uffd_wp(pte)) | 
|---|
| 1940 | flags |= PM_UFFD_WP; | 
|---|
| 1941 | entry = pte_to_swp_entry(pte); | 
|---|
| 1942 | if (pm->show_pfn) { | 
|---|
| 1943 | pgoff_t offset; | 
|---|
| 1944 | /* | 
|---|
| 1945 | * For PFN swap offsets, keeping the offset field | 
|---|
| 1946 | * to be PFN only to be compatible with old smaps. | 
|---|
| 1947 | */ | 
|---|
| 1948 | if (is_pfn_swap_entry(entry)) | 
|---|
| 1949 | offset = swp_offset_pfn(entry); | 
|---|
| 1950 | else | 
|---|
| 1951 | offset = swp_offset(entry); | 
|---|
| 1952 | frame = swp_type(entry) | | 
|---|
| 1953 | (offset << MAX_SWAPFILES_SHIFT); | 
|---|
| 1954 | } | 
|---|
| 1955 | flags |= PM_SWAP; | 
|---|
| 1956 | if (is_pfn_swap_entry(entry)) | 
|---|
| 1957 | page = pfn_swap_entry_to_page(entry); | 
|---|
| 1958 | if (pte_marker_entry_uffd_wp(entry)) | 
|---|
| 1959 | flags |= PM_UFFD_WP; | 
|---|
| 1960 | if (is_guard_swp_entry(entry)) | 
|---|
| 1961 | flags |=  PM_GUARD_REGION; | 
|---|
| 1962 | } | 
|---|
| 1963 |  | 
|---|
| 1964 | if (page) { | 
|---|
| 1965 | folio = page_folio(page); | 
|---|
| 1966 | if (!folio_test_anon(folio)) | 
|---|
| 1967 | flags |= PM_FILE; | 
|---|
| 1968 | if ((flags & PM_PRESENT) && | 
|---|
| 1969 | __folio_page_mapped_exclusively(folio, page)) | 
|---|
| 1970 | flags |= PM_MMAP_EXCLUSIVE; | 
|---|
| 1971 | } | 
|---|
| 1972 | if (vma->vm_flags & VM_SOFTDIRTY) | 
|---|
| 1973 | flags |= PM_SOFT_DIRTY; | 
|---|
| 1974 |  | 
|---|
| 1975 | return make_pme(frame, flags); | 
|---|
| 1976 | } | 
|---|
| 1977 |  | 
|---|
| 1978 | static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, | 
|---|
| 1979 | struct mm_walk *walk) | 
|---|
| 1980 | { | 
|---|
| 1981 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 1982 | struct pagemapread *pm = walk->private; | 
|---|
| 1983 | spinlock_t *ptl; | 
|---|
| 1984 | pte_t *pte, *orig_pte; | 
|---|
| 1985 | int err = 0; | 
|---|
| 1986 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 1987 |  | 
|---|
| 1988 | ptl = pmd_trans_huge_lock(pmdp, vma); | 
|---|
| 1989 | if (ptl) { | 
|---|
| 1990 | unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT; | 
|---|
| 1991 | u64 flags = 0, frame = 0; | 
|---|
| 1992 | pmd_t pmd = *pmdp; | 
|---|
| 1993 | struct page *page = NULL; | 
|---|
| 1994 | struct folio *folio = NULL; | 
|---|
| 1995 |  | 
|---|
| 1996 | if (vma->vm_flags & VM_SOFTDIRTY) | 
|---|
| 1997 | flags |= PM_SOFT_DIRTY; | 
|---|
| 1998 |  | 
|---|
| 1999 | if (pmd_present(pmd)) { | 
|---|
| 2000 | page = pmd_page(pmd); | 
|---|
| 2001 |  | 
|---|
| 2002 | flags |= PM_PRESENT; | 
|---|
| 2003 | if (pmd_soft_dirty(pmd)) | 
|---|
| 2004 | flags |= PM_SOFT_DIRTY; | 
|---|
| 2005 | if (pmd_uffd_wp(pmd)) | 
|---|
| 2006 | flags |= PM_UFFD_WP; | 
|---|
| 2007 | if (pm->show_pfn) | 
|---|
| 2008 | frame = pmd_pfn(pmd) + idx; | 
|---|
| 2009 | } | 
|---|
| 2010 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | 
|---|
| 2011 | else if (is_swap_pmd(pmd)) { | 
|---|
| 2012 | swp_entry_t entry = pmd_to_swp_entry(pmd); | 
|---|
| 2013 | unsigned long offset; | 
|---|
| 2014 |  | 
|---|
| 2015 | if (pm->show_pfn) { | 
|---|
| 2016 | if (is_pfn_swap_entry(entry)) | 
|---|
| 2017 | offset = swp_offset_pfn(entry) + idx; | 
|---|
| 2018 | else | 
|---|
| 2019 | offset = swp_offset(entry) + idx; | 
|---|
| 2020 | frame = swp_type(entry) | | 
|---|
| 2021 | (offset << MAX_SWAPFILES_SHIFT); | 
|---|
| 2022 | } | 
|---|
| 2023 | flags |= PM_SWAP; | 
|---|
| 2024 | if (pmd_swp_soft_dirty(pmd)) | 
|---|
| 2025 | flags |= PM_SOFT_DIRTY; | 
|---|
| 2026 | if (pmd_swp_uffd_wp(pmd)) | 
|---|
| 2027 | flags |= PM_UFFD_WP; | 
|---|
| 2028 | VM_BUG_ON(!is_pmd_migration_entry(pmd)); | 
|---|
| 2029 | page = pfn_swap_entry_to_page(entry); | 
|---|
| 2030 | } | 
|---|
| 2031 | #endif | 
|---|
| 2032 |  | 
|---|
| 2033 | if (page) { | 
|---|
| 2034 | folio = page_folio(page); | 
|---|
| 2035 | if (!folio_test_anon(folio)) | 
|---|
| 2036 | flags |= PM_FILE; | 
|---|
| 2037 | } | 
|---|
| 2038 |  | 
|---|
| 2039 | for (; addr != end; addr += PAGE_SIZE, idx++) { | 
|---|
| 2040 | u64 cur_flags = flags; | 
|---|
| 2041 | pagemap_entry_t pme; | 
|---|
| 2042 |  | 
|---|
| 2043 | if (folio && (flags & PM_PRESENT) && | 
|---|
| 2044 | __folio_page_mapped_exclusively(folio, page)) | 
|---|
| 2045 | cur_flags |= PM_MMAP_EXCLUSIVE; | 
|---|
| 2046 |  | 
|---|
| 2047 | pme = make_pme(frame, cur_flags); | 
|---|
| 2048 | err = add_to_pagemap(&pme, pm); | 
|---|
| 2049 | if (err) | 
|---|
| 2050 | break; | 
|---|
| 2051 | if (pm->show_pfn) { | 
|---|
| 2052 | if (flags & PM_PRESENT) | 
|---|
| 2053 | frame++; | 
|---|
| 2054 | else if (flags & PM_SWAP) | 
|---|
| 2055 | frame += (1 << MAX_SWAPFILES_SHIFT); | 
|---|
| 2056 | } | 
|---|
| 2057 | } | 
|---|
| 2058 | spin_unlock(ptl); | 
|---|
| 2059 | return err; | 
|---|
| 2060 | } | 
|---|
| 2061 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 
|---|
| 2062 |  | 
|---|
| 2063 | /* | 
|---|
| 2064 | * We can assume that @vma always points to a valid one and @end never | 
|---|
| 2065 | * goes beyond vma->vm_end. | 
|---|
| 2066 | */ | 
|---|
| 2067 | orig_pte = pte = pte_offset_map_lock(mm: walk->mm, pmd: pmdp, addr, ptlp: &ptl); | 
|---|
| 2068 | if (!pte) { | 
|---|
| 2069 | walk->action = ACTION_AGAIN; | 
|---|
| 2070 | return err; | 
|---|
| 2071 | } | 
|---|
| 2072 | for (; addr < end; pte++, addr += PAGE_SIZE) { | 
|---|
| 2073 | pagemap_entry_t pme; | 
|---|
| 2074 |  | 
|---|
| 2075 | pme = pte_to_pagemap_entry(pm, vma, addr, pte: ptep_get(ptep: pte)); | 
|---|
| 2076 | err = add_to_pagemap(pme: &pme, pm); | 
|---|
| 2077 | if (err) | 
|---|
| 2078 | break; | 
|---|
| 2079 | } | 
|---|
| 2080 | pte_unmap_unlock(orig_pte, ptl); | 
|---|
| 2081 |  | 
|---|
| 2082 | cond_resched(); | 
|---|
| 2083 |  | 
|---|
| 2084 | return err; | 
|---|
| 2085 | } | 
|---|
| 2086 |  | 
|---|
| 2087 | #ifdef CONFIG_HUGETLB_PAGE | 
|---|
| 2088 | /* This function walks within one hugetlb entry in the single call */ | 
|---|
| 2089 | static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, | 
|---|
| 2090 | unsigned long addr, unsigned long end, | 
|---|
| 2091 | struct mm_walk *walk) | 
|---|
| 2092 | { | 
|---|
| 2093 | struct pagemapread *pm = walk->private; | 
|---|
| 2094 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 2095 | u64 flags = 0, frame = 0; | 
|---|
| 2096 | spinlock_t *ptl; | 
|---|
| 2097 | int err = 0; | 
|---|
| 2098 | pte_t pte; | 
|---|
| 2099 |  | 
|---|
| 2100 | if (vma->vm_flags & VM_SOFTDIRTY) | 
|---|
| 2101 | flags |= PM_SOFT_DIRTY; | 
|---|
| 2102 |  | 
|---|
| 2103 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: walk->mm, pte: ptep); | 
|---|
| 2104 | pte = huge_ptep_get(mm: walk->mm, addr, ptep); | 
|---|
| 2105 | if (pte_present(a: pte)) { | 
|---|
| 2106 | struct folio *folio = page_folio(pte_page(pte)); | 
|---|
| 2107 |  | 
|---|
| 2108 | if (!folio_test_anon(folio)) | 
|---|
| 2109 | flags |= PM_FILE; | 
|---|
| 2110 |  | 
|---|
| 2111 | if (!folio_maybe_mapped_shared(folio) && | 
|---|
| 2112 | !hugetlb_pmd_shared(pte: ptep)) | 
|---|
| 2113 | flags |= PM_MMAP_EXCLUSIVE; | 
|---|
| 2114 |  | 
|---|
| 2115 | if (huge_pte_uffd_wp(pte)) | 
|---|
| 2116 | flags |= PM_UFFD_WP; | 
|---|
| 2117 |  | 
|---|
| 2118 | flags |= PM_PRESENT; | 
|---|
| 2119 | if (pm->show_pfn) | 
|---|
| 2120 | frame = pte_pfn(pte) + | 
|---|
| 2121 | ((addr & ~hmask) >> PAGE_SHIFT); | 
|---|
| 2122 | } else if (pte_swp_uffd_wp_any(pte)) { | 
|---|
| 2123 | flags |= PM_UFFD_WP; | 
|---|
| 2124 | } | 
|---|
| 2125 |  | 
|---|
| 2126 | for (; addr != end; addr += PAGE_SIZE) { | 
|---|
| 2127 | pagemap_entry_t pme = make_pme(frame, flags); | 
|---|
| 2128 |  | 
|---|
| 2129 | err = add_to_pagemap(pme: &pme, pm); | 
|---|
| 2130 | if (err) | 
|---|
| 2131 | break; | 
|---|
| 2132 | if (pm->show_pfn && (flags & PM_PRESENT)) | 
|---|
| 2133 | frame++; | 
|---|
| 2134 | } | 
|---|
| 2135 |  | 
|---|
| 2136 | spin_unlock(lock: ptl); | 
|---|
| 2137 | cond_resched(); | 
|---|
| 2138 |  | 
|---|
| 2139 | return err; | 
|---|
| 2140 | } | 
|---|
| 2141 | #else | 
|---|
| 2142 | #define pagemap_hugetlb_range	NULL | 
|---|
| 2143 | #endif /* HUGETLB_PAGE */ | 
|---|
| 2144 |  | 
|---|
| 2145 | static const struct mm_walk_ops pagemap_ops = { | 
|---|
| 2146 | .pmd_entry	= pagemap_pmd_range, | 
|---|
| 2147 | .pte_hole	= pagemap_pte_hole, | 
|---|
| 2148 | .hugetlb_entry	= pagemap_hugetlb_range, | 
|---|
| 2149 | .walk_lock	= PGWALK_RDLOCK, | 
|---|
| 2150 | }; | 
|---|
| 2151 |  | 
|---|
| 2152 | /* | 
|---|
| 2153 | * /proc/pid/pagemap - an array mapping virtual pages to pfns | 
|---|
| 2154 | * | 
|---|
| 2155 | * For each page in the address space, this file contains one 64-bit entry | 
|---|
| 2156 | * consisting of the following: | 
|---|
| 2157 | * | 
|---|
| 2158 | * Bits 0-54  page frame number (PFN) if present | 
|---|
| 2159 | * Bits 0-4   swap type if swapped | 
|---|
| 2160 | * Bits 5-54  swap offset if swapped | 
|---|
| 2161 | * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) | 
|---|
| 2162 | * Bit  56    page exclusively mapped | 
|---|
| 2163 | * Bit  57    pte is uffd-wp write-protected | 
|---|
| 2164 | * Bit  58    pte is a guard region | 
|---|
| 2165 | * Bits 59-60 zero | 
|---|
| 2166 | * Bit  61    page is file-page or shared-anon | 
|---|
| 2167 | * Bit  62    page swapped | 
|---|
| 2168 | * Bit  63    page present | 
|---|
| 2169 | * | 
|---|
| 2170 | * If the page is not present but in swap, then the PFN contains an | 
|---|
| 2171 | * encoding of the swap file number and the page's offset into the | 
|---|
| 2172 | * swap. Unmapped pages return a null PFN. This allows determining | 
|---|
| 2173 | * precisely which pages are mapped (or in swap) and comparing mapped | 
|---|
| 2174 | * pages between processes. | 
|---|
| 2175 | * | 
|---|
| 2176 | * Efficient users of this interface will use /proc/pid/maps to | 
|---|
| 2177 | * determine which areas of memory are actually mapped and llseek to | 
|---|
| 2178 | * skip over unmapped regions. | 
|---|
| 2179 | */ | 
|---|
| 2180 | static ssize_t pagemap_read(struct file *file, char __user *buf, | 
|---|
| 2181 | size_t count, loff_t *ppos) | 
|---|
| 2182 | { | 
|---|
| 2183 | struct mm_struct *mm = file->private_data; | 
|---|
| 2184 | struct pagemapread pm; | 
|---|
| 2185 | unsigned long src; | 
|---|
| 2186 | unsigned long svpfn; | 
|---|
| 2187 | unsigned long start_vaddr; | 
|---|
| 2188 | unsigned long end_vaddr; | 
|---|
| 2189 | int ret = 0, copied = 0; | 
|---|
| 2190 |  | 
|---|
| 2191 | if (!mm || !mmget_not_zero(mm)) | 
|---|
| 2192 | goto out; | 
|---|
| 2193 |  | 
|---|
| 2194 | ret = -EINVAL; | 
|---|
| 2195 | /* file position must be aligned */ | 
|---|
| 2196 | if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) | 
|---|
| 2197 | goto out_mm; | 
|---|
| 2198 |  | 
|---|
| 2199 | ret = 0; | 
|---|
| 2200 | if (!count) | 
|---|
| 2201 | goto out_mm; | 
|---|
| 2202 |  | 
|---|
| 2203 | /* do not disclose physical addresses: attack vector */ | 
|---|
| 2204 | pm.show_pfn = file_ns_capable(file, ns: &init_user_ns, CAP_SYS_ADMIN); | 
|---|
| 2205 |  | 
|---|
| 2206 | pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); | 
|---|
| 2207 | pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); | 
|---|
| 2208 | ret = -ENOMEM; | 
|---|
| 2209 | if (!pm.buffer) | 
|---|
| 2210 | goto out_mm; | 
|---|
| 2211 |  | 
|---|
| 2212 | src = *ppos; | 
|---|
| 2213 | svpfn = src / PM_ENTRY_BYTES; | 
|---|
| 2214 | end_vaddr = mm->task_size; | 
|---|
| 2215 |  | 
|---|
| 2216 | /* watch out for wraparound */ | 
|---|
| 2217 | start_vaddr = end_vaddr; | 
|---|
| 2218 | if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) { | 
|---|
| 2219 | unsigned long end; | 
|---|
| 2220 |  | 
|---|
| 2221 | ret = mmap_read_lock_killable(mm); | 
|---|
| 2222 | if (ret) | 
|---|
| 2223 | goto out_free; | 
|---|
| 2224 | start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT); | 
|---|
| 2225 | mmap_read_unlock(mm); | 
|---|
| 2226 |  | 
|---|
| 2227 | end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT); | 
|---|
| 2228 | if (end >= start_vaddr && end < mm->task_size) | 
|---|
| 2229 | end_vaddr = end; | 
|---|
| 2230 | } | 
|---|
| 2231 |  | 
|---|
| 2232 | /* Ensure the address is inside the task */ | 
|---|
| 2233 | if (start_vaddr > mm->task_size) | 
|---|
| 2234 | start_vaddr = end_vaddr; | 
|---|
| 2235 |  | 
|---|
| 2236 | ret = 0; | 
|---|
| 2237 | while (count && (start_vaddr < end_vaddr)) { | 
|---|
| 2238 | int len; | 
|---|
| 2239 | unsigned long end; | 
|---|
| 2240 |  | 
|---|
| 2241 | pm.pos = 0; | 
|---|
| 2242 | end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; | 
|---|
| 2243 | /* overflow ? */ | 
|---|
| 2244 | if (end < start_vaddr || end > end_vaddr) | 
|---|
| 2245 | end = end_vaddr; | 
|---|
| 2246 | ret = mmap_read_lock_killable(mm); | 
|---|
| 2247 | if (ret) | 
|---|
| 2248 | goto out_free; | 
|---|
| 2249 | ret = walk_page_range(mm, start: start_vaddr, end, ops: &pagemap_ops, private: &pm); | 
|---|
| 2250 | mmap_read_unlock(mm); | 
|---|
| 2251 | start_vaddr = end; | 
|---|
| 2252 |  | 
|---|
| 2253 | len = min(count, PM_ENTRY_BYTES * pm.pos); | 
|---|
| 2254 | if (copy_to_user(to: buf, from: pm.buffer, n: len)) { | 
|---|
| 2255 | ret = -EFAULT; | 
|---|
| 2256 | goto out_free; | 
|---|
| 2257 | } | 
|---|
| 2258 | copied += len; | 
|---|
| 2259 | buf += len; | 
|---|
| 2260 | count -= len; | 
|---|
| 2261 | } | 
|---|
| 2262 | *ppos += copied; | 
|---|
| 2263 | if (!ret || ret == PM_END_OF_BUFFER) | 
|---|
| 2264 | ret = copied; | 
|---|
| 2265 |  | 
|---|
| 2266 | out_free: | 
|---|
| 2267 | kfree(objp: pm.buffer); | 
|---|
| 2268 | out_mm: | 
|---|
| 2269 | mmput(mm); | 
|---|
| 2270 | out: | 
|---|
| 2271 | return ret; | 
|---|
| 2272 | } | 
|---|
| 2273 |  | 
|---|
| 2274 | static int pagemap_open(struct inode *inode, struct file *file) | 
|---|
| 2275 | { | 
|---|
| 2276 | struct mm_struct *mm; | 
|---|
| 2277 |  | 
|---|
| 2278 | mm = proc_mem_open(inode, PTRACE_MODE_READ); | 
|---|
| 2279 | if (IS_ERR_OR_NULL(ptr: mm)) | 
|---|
| 2280 | return mm ? PTR_ERR(ptr: mm) : -ESRCH; | 
|---|
| 2281 | file->private_data = mm; | 
|---|
| 2282 | return 0; | 
|---|
| 2283 | } | 
|---|
| 2284 |  | 
|---|
| 2285 | static int pagemap_release(struct inode *inode, struct file *file) | 
|---|
| 2286 | { | 
|---|
| 2287 | struct mm_struct *mm = file->private_data; | 
|---|
| 2288 |  | 
|---|
| 2289 | if (mm) | 
|---|
| 2290 | mmdrop(mm); | 
|---|
| 2291 | return 0; | 
|---|
| 2292 | } | 
|---|
| 2293 |  | 
|---|
| 2294 | #define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\ | 
|---|
| 2295 | PAGE_IS_FILE |	PAGE_IS_PRESENT |	\ | 
|---|
| 2296 | PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\ | 
|---|
| 2297 | PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY |	\ | 
|---|
| 2298 | PAGE_IS_GUARD) | 
|---|
| 2299 | #define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC) | 
|---|
| 2300 |  | 
|---|
| 2301 | struct pagemap_scan_private { | 
|---|
| 2302 | struct pm_scan_arg arg; | 
|---|
| 2303 | unsigned long masks_of_interest, cur_vma_category; | 
|---|
| 2304 | struct page_region *vec_buf; | 
|---|
| 2305 | unsigned long vec_buf_len, vec_buf_index, found_pages; | 
|---|
| 2306 | struct page_region __user *vec_out; | 
|---|
| 2307 | }; | 
|---|
| 2308 |  | 
|---|
| 2309 | static unsigned long pagemap_page_category(struct pagemap_scan_private *p, | 
|---|
| 2310 | struct vm_area_struct *vma, | 
|---|
| 2311 | unsigned long addr, pte_t pte) | 
|---|
| 2312 | { | 
|---|
| 2313 | unsigned long categories = 0; | 
|---|
| 2314 |  | 
|---|
| 2315 | if (pte_present(a: pte)) { | 
|---|
| 2316 | struct page *page; | 
|---|
| 2317 |  | 
|---|
| 2318 | categories |= PAGE_IS_PRESENT; | 
|---|
| 2319 | if (!pte_uffd_wp(pte)) | 
|---|
| 2320 | categories |= PAGE_IS_WRITTEN; | 
|---|
| 2321 |  | 
|---|
| 2322 | if (p->masks_of_interest & PAGE_IS_FILE) { | 
|---|
| 2323 | page = vm_normal_page(vma, addr, pte); | 
|---|
| 2324 | if (page && !PageAnon(page)) | 
|---|
| 2325 | categories |= PAGE_IS_FILE; | 
|---|
| 2326 | } | 
|---|
| 2327 |  | 
|---|
| 2328 | if (is_zero_pfn(pfn: pte_pfn(pte))) | 
|---|
| 2329 | categories |= PAGE_IS_PFNZERO; | 
|---|
| 2330 | if (pte_soft_dirty(pte)) | 
|---|
| 2331 | categories |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2332 | } else if (is_swap_pte(pte)) { | 
|---|
| 2333 | swp_entry_t swp; | 
|---|
| 2334 |  | 
|---|
| 2335 | categories |= PAGE_IS_SWAPPED; | 
|---|
| 2336 | if (!pte_swp_uffd_wp_any(pte)) | 
|---|
| 2337 | categories |= PAGE_IS_WRITTEN; | 
|---|
| 2338 |  | 
|---|
| 2339 | swp = pte_to_swp_entry(pte); | 
|---|
| 2340 | if (is_guard_swp_entry(entry: swp)) | 
|---|
| 2341 | categories |= PAGE_IS_GUARD; | 
|---|
| 2342 | else if ((p->masks_of_interest & PAGE_IS_FILE) && | 
|---|
| 2343 | is_pfn_swap_entry(entry: swp) && | 
|---|
| 2344 | !folio_test_anon(folio: pfn_swap_entry_folio(entry: swp))) | 
|---|
| 2345 | categories |= PAGE_IS_FILE; | 
|---|
| 2346 |  | 
|---|
| 2347 | if (pte_swp_soft_dirty(pte)) | 
|---|
| 2348 | categories |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2349 | } | 
|---|
| 2350 |  | 
|---|
| 2351 | return categories; | 
|---|
| 2352 | } | 
|---|
| 2353 |  | 
|---|
| 2354 | static void make_uffd_wp_pte(struct vm_area_struct *vma, | 
|---|
| 2355 | unsigned long addr, pte_t *pte, pte_t ptent) | 
|---|
| 2356 | { | 
|---|
| 2357 | if (pte_present(a: ptent)) { | 
|---|
| 2358 | pte_t old_pte; | 
|---|
| 2359 |  | 
|---|
| 2360 | old_pte = ptep_modify_prot_start(vma, addr, ptep: pte); | 
|---|
| 2361 | ptent = pte_mkuffd_wp(pte: old_pte); | 
|---|
| 2362 | ptep_modify_prot_commit(vma, addr, ptep: pte, old_pte, pte: ptent); | 
|---|
| 2363 | } else if (is_swap_pte(pte: ptent)) { | 
|---|
| 2364 | ptent = pte_swp_mkuffd_wp(pte: ptent); | 
|---|
| 2365 | set_pte_at(vma->vm_mm, addr, pte, ptent); | 
|---|
| 2366 | } else { | 
|---|
| 2367 | set_pte_at(vma->vm_mm, addr, pte, | 
|---|
| 2368 | make_pte_marker(PTE_MARKER_UFFD_WP)); | 
|---|
| 2369 | } | 
|---|
| 2370 | } | 
|---|
| 2371 |  | 
|---|
| 2372 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 2373 | static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, | 
|---|
| 2374 | struct vm_area_struct *vma, | 
|---|
| 2375 | unsigned long addr, pmd_t pmd) | 
|---|
| 2376 | { | 
|---|
| 2377 | unsigned long categories = PAGE_IS_HUGE; | 
|---|
| 2378 |  | 
|---|
| 2379 | if (pmd_present(pmd)) { | 
|---|
| 2380 | struct page *page; | 
|---|
| 2381 |  | 
|---|
| 2382 | categories |= PAGE_IS_PRESENT; | 
|---|
| 2383 | if (!pmd_uffd_wp(pmd)) | 
|---|
| 2384 | categories |= PAGE_IS_WRITTEN; | 
|---|
| 2385 |  | 
|---|
| 2386 | if (p->masks_of_interest & PAGE_IS_FILE) { | 
|---|
| 2387 | page = vm_normal_page_pmd(vma, addr, pmd); | 
|---|
| 2388 | if (page && !PageAnon(page)) | 
|---|
| 2389 | categories |= PAGE_IS_FILE; | 
|---|
| 2390 | } | 
|---|
| 2391 |  | 
|---|
| 2392 | if (is_huge_zero_pmd(pmd)) | 
|---|
| 2393 | categories |= PAGE_IS_PFNZERO; | 
|---|
| 2394 | if (pmd_soft_dirty(pmd)) | 
|---|
| 2395 | categories |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2396 | } else if (is_swap_pmd(pmd)) { | 
|---|
| 2397 | swp_entry_t swp; | 
|---|
| 2398 |  | 
|---|
| 2399 | categories |= PAGE_IS_SWAPPED; | 
|---|
| 2400 | if (!pmd_swp_uffd_wp(pmd)) | 
|---|
| 2401 | categories |= PAGE_IS_WRITTEN; | 
|---|
| 2402 | if (pmd_swp_soft_dirty(pmd)) | 
|---|
| 2403 | categories |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2404 |  | 
|---|
| 2405 | if (p->masks_of_interest & PAGE_IS_FILE) { | 
|---|
| 2406 | swp = pmd_to_swp_entry(pmd); | 
|---|
| 2407 | if (is_pfn_swap_entry(swp) && | 
|---|
| 2408 | !folio_test_anon(pfn_swap_entry_folio(swp))) | 
|---|
| 2409 | categories |= PAGE_IS_FILE; | 
|---|
| 2410 | } | 
|---|
| 2411 | } | 
|---|
| 2412 |  | 
|---|
| 2413 | return categories; | 
|---|
| 2414 | } | 
|---|
| 2415 |  | 
|---|
| 2416 | static void make_uffd_wp_pmd(struct vm_area_struct *vma, | 
|---|
| 2417 | unsigned long addr, pmd_t *pmdp) | 
|---|
| 2418 | { | 
|---|
| 2419 | pmd_t old, pmd = *pmdp; | 
|---|
| 2420 |  | 
|---|
| 2421 | if (pmd_present(pmd)) { | 
|---|
| 2422 | old = pmdp_invalidate_ad(vma, addr, pmdp); | 
|---|
| 2423 | pmd = pmd_mkuffd_wp(old); | 
|---|
| 2424 | set_pmd_at(vma->vm_mm, addr, pmdp, pmd); | 
|---|
| 2425 | } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { | 
|---|
| 2426 | pmd = pmd_swp_mkuffd_wp(pmd); | 
|---|
| 2427 | set_pmd_at(vma->vm_mm, addr, pmdp, pmd); | 
|---|
| 2428 | } | 
|---|
| 2429 | } | 
|---|
| 2430 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 
|---|
| 2431 |  | 
|---|
| 2432 | #ifdef CONFIG_HUGETLB_PAGE | 
|---|
| 2433 | static unsigned long pagemap_hugetlb_category(pte_t pte) | 
|---|
| 2434 | { | 
|---|
| 2435 | unsigned long categories = PAGE_IS_HUGE; | 
|---|
| 2436 |  | 
|---|
| 2437 | /* | 
|---|
| 2438 | * According to pagemap_hugetlb_range(), file-backed HugeTLB | 
|---|
| 2439 | * page cannot be swapped. So PAGE_IS_FILE is not checked for | 
|---|
| 2440 | * swapped pages. | 
|---|
| 2441 | */ | 
|---|
| 2442 | if (pte_present(a: pte)) { | 
|---|
| 2443 | categories |= PAGE_IS_PRESENT; | 
|---|
| 2444 | if (!huge_pte_uffd_wp(pte)) | 
|---|
| 2445 | categories |= PAGE_IS_WRITTEN; | 
|---|
| 2446 | if (!PageAnon(pte_page(pte))) | 
|---|
| 2447 | categories |= PAGE_IS_FILE; | 
|---|
| 2448 | if (is_zero_pfn(pfn: pte_pfn(pte))) | 
|---|
| 2449 | categories |= PAGE_IS_PFNZERO; | 
|---|
| 2450 | if (pte_soft_dirty(pte)) | 
|---|
| 2451 | categories |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2452 | } else if (is_swap_pte(pte)) { | 
|---|
| 2453 | categories |= PAGE_IS_SWAPPED; | 
|---|
| 2454 | if (!pte_swp_uffd_wp_any(pte)) | 
|---|
| 2455 | categories |= PAGE_IS_WRITTEN; | 
|---|
| 2456 | if (pte_swp_soft_dirty(pte)) | 
|---|
| 2457 | categories |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2458 | } | 
|---|
| 2459 |  | 
|---|
| 2460 | return categories; | 
|---|
| 2461 | } | 
|---|
| 2462 |  | 
|---|
| 2463 | static void make_uffd_wp_huge_pte(struct vm_area_struct *vma, | 
|---|
| 2464 | unsigned long addr, pte_t *ptep, | 
|---|
| 2465 | pte_t ptent) | 
|---|
| 2466 | { | 
|---|
| 2467 | unsigned long psize; | 
|---|
| 2468 |  | 
|---|
| 2469 | if (is_hugetlb_entry_hwpoisoned(pte: ptent) || is_pte_marker(pte: ptent)) | 
|---|
| 2470 | return; | 
|---|
| 2471 |  | 
|---|
| 2472 | psize = huge_page_size(h: hstate_vma(vma)); | 
|---|
| 2473 |  | 
|---|
| 2474 | if (is_hugetlb_entry_migration(pte: ptent)) | 
|---|
| 2475 | set_huge_pte_at(mm: vma->vm_mm, addr, ptep, | 
|---|
| 2476 | pte: pte_swp_mkuffd_wp(pte: ptent), sz: psize); | 
|---|
| 2477 | else if (!huge_pte_none(pte: ptent)) | 
|---|
| 2478 | huge_ptep_modify_prot_commit(vma, addr, ptep, old_pte: ptent, | 
|---|
| 2479 | pte: huge_pte_mkuffd_wp(pte: ptent)); | 
|---|
| 2480 | else | 
|---|
| 2481 | set_huge_pte_at(mm: vma->vm_mm, addr, ptep, | 
|---|
| 2482 | pte: make_pte_marker(PTE_MARKER_UFFD_WP), sz: psize); | 
|---|
| 2483 | } | 
|---|
| 2484 | #endif /* CONFIG_HUGETLB_PAGE */ | 
|---|
| 2485 |  | 
|---|
| 2486 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) | 
|---|
| 2487 | static void pagemap_scan_backout_range(struct pagemap_scan_private *p, | 
|---|
| 2488 | unsigned long addr, unsigned long end) | 
|---|
| 2489 | { | 
|---|
| 2490 | struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; | 
|---|
| 2491 |  | 
|---|
| 2492 | if (!p->vec_buf) | 
|---|
| 2493 | return; | 
|---|
| 2494 |  | 
|---|
| 2495 | if (cur_buf->start != addr) | 
|---|
| 2496 | cur_buf->end = addr; | 
|---|
| 2497 | else | 
|---|
| 2498 | cur_buf->start = cur_buf->end = 0; | 
|---|
| 2499 |  | 
|---|
| 2500 | p->found_pages -= (end - addr) / PAGE_SIZE; | 
|---|
| 2501 | } | 
|---|
| 2502 | #endif | 
|---|
| 2503 |  | 
|---|
| 2504 | static bool pagemap_scan_is_interesting_page(unsigned long categories, | 
|---|
| 2505 | const struct pagemap_scan_private *p) | 
|---|
| 2506 | { | 
|---|
| 2507 | categories ^= p->arg.category_inverted; | 
|---|
| 2508 | if ((categories & p->arg.category_mask) != p->arg.category_mask) | 
|---|
| 2509 | return false; | 
|---|
| 2510 | if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask)) | 
|---|
| 2511 | return false; | 
|---|
| 2512 |  | 
|---|
| 2513 | return true; | 
|---|
| 2514 | } | 
|---|
| 2515 |  | 
|---|
| 2516 | static bool pagemap_scan_is_interesting_vma(unsigned long categories, | 
|---|
| 2517 | const struct pagemap_scan_private *p) | 
|---|
| 2518 | { | 
|---|
| 2519 | unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED; | 
|---|
| 2520 |  | 
|---|
| 2521 | categories ^= p->arg.category_inverted; | 
|---|
| 2522 | if ((categories & required) != required) | 
|---|
| 2523 | return false; | 
|---|
| 2524 |  | 
|---|
| 2525 | return true; | 
|---|
| 2526 | } | 
|---|
| 2527 |  | 
|---|
| 2528 | static int pagemap_scan_test_walk(unsigned long start, unsigned long end, | 
|---|
| 2529 | struct mm_walk *walk) | 
|---|
| 2530 | { | 
|---|
| 2531 | struct pagemap_scan_private *p = walk->private; | 
|---|
| 2532 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 2533 | unsigned long vma_category = 0; | 
|---|
| 2534 | bool wp_allowed = userfaultfd_wp_async(vma) && | 
|---|
| 2535 | userfaultfd_wp_use_markers(vma); | 
|---|
| 2536 |  | 
|---|
| 2537 | if (!wp_allowed) { | 
|---|
| 2538 | /* User requested explicit failure over wp-async capability */ | 
|---|
| 2539 | if (p->arg.flags & PM_SCAN_CHECK_WPASYNC) | 
|---|
| 2540 | return -EPERM; | 
|---|
| 2541 | /* | 
|---|
| 2542 | * User requires wr-protect, and allows silently skipping | 
|---|
| 2543 | * unsupported vmas. | 
|---|
| 2544 | */ | 
|---|
| 2545 | if (p->arg.flags & PM_SCAN_WP_MATCHING) | 
|---|
| 2546 | return 1; | 
|---|
| 2547 | /* | 
|---|
| 2548 | * Then the request doesn't involve wr-protects at all, | 
|---|
| 2549 | * fall through to the rest checks, and allow vma walk. | 
|---|
| 2550 | */ | 
|---|
| 2551 | } | 
|---|
| 2552 |  | 
|---|
| 2553 | if (vma->vm_flags & VM_PFNMAP) | 
|---|
| 2554 | return 1; | 
|---|
| 2555 |  | 
|---|
| 2556 | if (wp_allowed) | 
|---|
| 2557 | vma_category |= PAGE_IS_WPALLOWED; | 
|---|
| 2558 |  | 
|---|
| 2559 | if (vma->vm_flags & VM_SOFTDIRTY) | 
|---|
| 2560 | vma_category |= PAGE_IS_SOFT_DIRTY; | 
|---|
| 2561 |  | 
|---|
| 2562 | if (!pagemap_scan_is_interesting_vma(categories: vma_category, p)) | 
|---|
| 2563 | return 1; | 
|---|
| 2564 |  | 
|---|
| 2565 | p->cur_vma_category = vma_category; | 
|---|
| 2566 |  | 
|---|
| 2567 | return 0; | 
|---|
| 2568 | } | 
|---|
| 2569 |  | 
|---|
| 2570 | static bool pagemap_scan_push_range(unsigned long categories, | 
|---|
| 2571 | struct pagemap_scan_private *p, | 
|---|
| 2572 | unsigned long addr, unsigned long end) | 
|---|
| 2573 | { | 
|---|
| 2574 | struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; | 
|---|
| 2575 |  | 
|---|
| 2576 | /* | 
|---|
| 2577 | * When there is no output buffer provided at all, the sentinel values | 
|---|
| 2578 | * won't match here. There is no other way for `cur_buf->end` to be | 
|---|
| 2579 | * non-zero other than it being non-empty. | 
|---|
| 2580 | */ | 
|---|
| 2581 | if (addr == cur_buf->end && categories == cur_buf->categories) { | 
|---|
| 2582 | cur_buf->end = end; | 
|---|
| 2583 | return true; | 
|---|
| 2584 | } | 
|---|
| 2585 |  | 
|---|
| 2586 | if (cur_buf->end) { | 
|---|
| 2587 | if (p->vec_buf_index >= p->vec_buf_len - 1) | 
|---|
| 2588 | return false; | 
|---|
| 2589 |  | 
|---|
| 2590 | cur_buf = &p->vec_buf[++p->vec_buf_index]; | 
|---|
| 2591 | } | 
|---|
| 2592 |  | 
|---|
| 2593 | cur_buf->start = addr; | 
|---|
| 2594 | cur_buf->end = end; | 
|---|
| 2595 | cur_buf->categories = categories; | 
|---|
| 2596 |  | 
|---|
| 2597 | return true; | 
|---|
| 2598 | } | 
|---|
| 2599 |  | 
|---|
| 2600 | static int pagemap_scan_output(unsigned long categories, | 
|---|
| 2601 | struct pagemap_scan_private *p, | 
|---|
| 2602 | unsigned long addr, unsigned long *end) | 
|---|
| 2603 | { | 
|---|
| 2604 | unsigned long n_pages, total_pages; | 
|---|
| 2605 | int ret = 0; | 
|---|
| 2606 |  | 
|---|
| 2607 | if (!p->vec_buf) | 
|---|
| 2608 | return 0; | 
|---|
| 2609 |  | 
|---|
| 2610 | categories &= p->arg.return_mask; | 
|---|
| 2611 |  | 
|---|
| 2612 | n_pages = (*end - addr) / PAGE_SIZE; | 
|---|
| 2613 | if (check_add_overflow(p->found_pages, n_pages, &total_pages) || | 
|---|
| 2614 | total_pages > p->arg.max_pages) { | 
|---|
| 2615 | size_t n_too_much = total_pages - p->arg.max_pages; | 
|---|
| 2616 | *end -= n_too_much * PAGE_SIZE; | 
|---|
| 2617 | n_pages -= n_too_much; | 
|---|
| 2618 | ret = -ENOSPC; | 
|---|
| 2619 | } | 
|---|
| 2620 |  | 
|---|
| 2621 | if (!pagemap_scan_push_range(categories, p, addr, end: *end)) { | 
|---|
| 2622 | *end = addr; | 
|---|
| 2623 | n_pages = 0; | 
|---|
| 2624 | ret = -ENOSPC; | 
|---|
| 2625 | } | 
|---|
| 2626 |  | 
|---|
| 2627 | p->found_pages += n_pages; | 
|---|
| 2628 | if (ret) | 
|---|
| 2629 | p->arg.walk_end = *end; | 
|---|
| 2630 |  | 
|---|
| 2631 | return ret; | 
|---|
| 2632 | } | 
|---|
| 2633 |  | 
|---|
| 2634 | static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start, | 
|---|
| 2635 | unsigned long end, struct mm_walk *walk) | 
|---|
| 2636 | { | 
|---|
| 2637 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 2638 | struct pagemap_scan_private *p = walk->private; | 
|---|
| 2639 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 2640 | unsigned long categories; | 
|---|
| 2641 | spinlock_t *ptl; | 
|---|
| 2642 | int ret = 0; | 
|---|
| 2643 |  | 
|---|
| 2644 | ptl = pmd_trans_huge_lock(pmd, vma); | 
|---|
| 2645 | if (!ptl) | 
|---|
| 2646 | return -ENOENT; | 
|---|
| 2647 |  | 
|---|
| 2648 | categories = p->cur_vma_category | | 
|---|
| 2649 | pagemap_thp_category(p, vma, start, *pmd); | 
|---|
| 2650 |  | 
|---|
| 2651 | if (!pagemap_scan_is_interesting_page(categories, p)) | 
|---|
| 2652 | goto out_unlock; | 
|---|
| 2653 |  | 
|---|
| 2654 | ret = pagemap_scan_output(categories, p, start, &end); | 
|---|
| 2655 | if (start == end) | 
|---|
| 2656 | goto out_unlock; | 
|---|
| 2657 |  | 
|---|
| 2658 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) | 
|---|
| 2659 | goto out_unlock; | 
|---|
| 2660 | if (~categories & PAGE_IS_WRITTEN) | 
|---|
| 2661 | goto out_unlock; | 
|---|
| 2662 |  | 
|---|
| 2663 | /* | 
|---|
| 2664 | * Break huge page into small pages if the WP operation | 
|---|
| 2665 | * needs to be performed on a portion of the huge page. | 
|---|
| 2666 | */ | 
|---|
| 2667 | if (end != start + HPAGE_SIZE) { | 
|---|
| 2668 | spin_unlock(ptl); | 
|---|
| 2669 | split_huge_pmd(vma, pmd, start); | 
|---|
| 2670 | pagemap_scan_backout_range(p, start, end); | 
|---|
| 2671 | /* Report as if there was no THP */ | 
|---|
| 2672 | return -ENOENT; | 
|---|
| 2673 | } | 
|---|
| 2674 |  | 
|---|
| 2675 | make_uffd_wp_pmd(vma, start, pmd); | 
|---|
| 2676 | flush_tlb_range(vma, start, end); | 
|---|
| 2677 | out_unlock: | 
|---|
| 2678 | spin_unlock(ptl); | 
|---|
| 2679 | return ret; | 
|---|
| 2680 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | 
|---|
| 2681 | return -ENOENT; | 
|---|
| 2682 | #endif | 
|---|
| 2683 | } | 
|---|
| 2684 |  | 
|---|
| 2685 | static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, | 
|---|
| 2686 | unsigned long end, struct mm_walk *walk) | 
|---|
| 2687 | { | 
|---|
| 2688 | struct pagemap_scan_private *p = walk->private; | 
|---|
| 2689 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 2690 | unsigned long addr, flush_end = 0; | 
|---|
| 2691 | pte_t *pte, *start_pte; | 
|---|
| 2692 | spinlock_t *ptl; | 
|---|
| 2693 | int ret; | 
|---|
| 2694 |  | 
|---|
| 2695 | ret = pagemap_scan_thp_entry(pmd, start, end, walk); | 
|---|
| 2696 | if (ret != -ENOENT) | 
|---|
| 2697 | return ret; | 
|---|
| 2698 |  | 
|---|
| 2699 | ret = 0; | 
|---|
| 2700 | start_pte = pte = pte_offset_map_lock(mm: vma->vm_mm, pmd, addr: start, ptlp: &ptl); | 
|---|
| 2701 | if (!pte) { | 
|---|
| 2702 | walk->action = ACTION_AGAIN; | 
|---|
| 2703 | return 0; | 
|---|
| 2704 | } | 
|---|
| 2705 |  | 
|---|
| 2706 | arch_enter_lazy_mmu_mode(); | 
|---|
| 2707 |  | 
|---|
| 2708 | if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { | 
|---|
| 2709 | /* Fast path for performing exclusive WP */ | 
|---|
| 2710 | for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { | 
|---|
| 2711 | pte_t ptent = ptep_get(ptep: pte); | 
|---|
| 2712 |  | 
|---|
| 2713 | if ((pte_present(a: ptent) && pte_uffd_wp(pte: ptent)) || | 
|---|
| 2714 | pte_swp_uffd_wp_any(pte: ptent)) | 
|---|
| 2715 | continue; | 
|---|
| 2716 | make_uffd_wp_pte(vma, addr, pte, ptent); | 
|---|
| 2717 | if (!flush_end) | 
|---|
| 2718 | start = addr; | 
|---|
| 2719 | flush_end = addr + PAGE_SIZE; | 
|---|
| 2720 | } | 
|---|
| 2721 | goto flush_and_return; | 
|---|
| 2722 | } | 
|---|
| 2723 |  | 
|---|
| 2724 | if (!p->arg.category_anyof_mask && !p->arg.category_inverted && | 
|---|
| 2725 | p->arg.category_mask == PAGE_IS_WRITTEN && | 
|---|
| 2726 | p->arg.return_mask == PAGE_IS_WRITTEN) { | 
|---|
| 2727 | for (addr = start; addr < end; pte++, addr += PAGE_SIZE) { | 
|---|
| 2728 | unsigned long next = addr + PAGE_SIZE; | 
|---|
| 2729 | pte_t ptent = ptep_get(ptep: pte); | 
|---|
| 2730 |  | 
|---|
| 2731 | if ((pte_present(a: ptent) && pte_uffd_wp(pte: ptent)) || | 
|---|
| 2732 | pte_swp_uffd_wp_any(pte: ptent)) | 
|---|
| 2733 | continue; | 
|---|
| 2734 | ret = pagemap_scan_output(categories: p->cur_vma_category | PAGE_IS_WRITTEN, | 
|---|
| 2735 | p, addr, end: &next); | 
|---|
| 2736 | if (next == addr) | 
|---|
| 2737 | break; | 
|---|
| 2738 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) | 
|---|
| 2739 | continue; | 
|---|
| 2740 | make_uffd_wp_pte(vma, addr, pte, ptent); | 
|---|
| 2741 | if (!flush_end) | 
|---|
| 2742 | start = addr; | 
|---|
| 2743 | flush_end = next; | 
|---|
| 2744 | } | 
|---|
| 2745 | goto flush_and_return; | 
|---|
| 2746 | } | 
|---|
| 2747 |  | 
|---|
| 2748 | for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { | 
|---|
| 2749 | pte_t ptent = ptep_get(ptep: pte); | 
|---|
| 2750 | unsigned long categories = p->cur_vma_category | | 
|---|
| 2751 | pagemap_page_category(p, vma, addr, pte: ptent); | 
|---|
| 2752 | unsigned long next = addr + PAGE_SIZE; | 
|---|
| 2753 |  | 
|---|
| 2754 | if (!pagemap_scan_is_interesting_page(categories, p)) | 
|---|
| 2755 | continue; | 
|---|
| 2756 |  | 
|---|
| 2757 | ret = pagemap_scan_output(categories, p, addr, end: &next); | 
|---|
| 2758 | if (next == addr) | 
|---|
| 2759 | break; | 
|---|
| 2760 |  | 
|---|
| 2761 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) | 
|---|
| 2762 | continue; | 
|---|
| 2763 | if (~categories & PAGE_IS_WRITTEN) | 
|---|
| 2764 | continue; | 
|---|
| 2765 |  | 
|---|
| 2766 | make_uffd_wp_pte(vma, addr, pte, ptent); | 
|---|
| 2767 | if (!flush_end) | 
|---|
| 2768 | start = addr; | 
|---|
| 2769 | flush_end = next; | 
|---|
| 2770 | } | 
|---|
| 2771 |  | 
|---|
| 2772 | flush_and_return: | 
|---|
| 2773 | if (flush_end) | 
|---|
| 2774 | flush_tlb_range(vma, start, addr); | 
|---|
| 2775 |  | 
|---|
| 2776 | arch_leave_lazy_mmu_mode(); | 
|---|
| 2777 | pte_unmap_unlock(start_pte, ptl); | 
|---|
| 2778 |  | 
|---|
| 2779 | cond_resched(); | 
|---|
| 2780 | return ret; | 
|---|
| 2781 | } | 
|---|
| 2782 |  | 
|---|
| 2783 | #ifdef CONFIG_HUGETLB_PAGE | 
|---|
| 2784 | static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask, | 
|---|
| 2785 | unsigned long start, unsigned long end, | 
|---|
| 2786 | struct mm_walk *walk) | 
|---|
| 2787 | { | 
|---|
| 2788 | struct pagemap_scan_private *p = walk->private; | 
|---|
| 2789 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 2790 | unsigned long categories; | 
|---|
| 2791 | spinlock_t *ptl; | 
|---|
| 2792 | int ret = 0; | 
|---|
| 2793 | pte_t pte; | 
|---|
| 2794 |  | 
|---|
| 2795 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) { | 
|---|
| 2796 | /* Go the short route when not write-protecting pages. */ | 
|---|
| 2797 |  | 
|---|
| 2798 | pte = huge_ptep_get(mm: walk->mm, addr: start, ptep); | 
|---|
| 2799 | categories = p->cur_vma_category | pagemap_hugetlb_category(pte); | 
|---|
| 2800 |  | 
|---|
| 2801 | if (!pagemap_scan_is_interesting_page(categories, p)) | 
|---|
| 2802 | return 0; | 
|---|
| 2803 |  | 
|---|
| 2804 | return pagemap_scan_output(categories, p, addr: start, end: &end); | 
|---|
| 2805 | } | 
|---|
| 2806 |  | 
|---|
| 2807 | i_mmap_lock_write(mapping: vma->vm_file->f_mapping); | 
|---|
| 2808 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: vma->vm_mm, pte: ptep); | 
|---|
| 2809 |  | 
|---|
| 2810 | pte = huge_ptep_get(mm: walk->mm, addr: start, ptep); | 
|---|
| 2811 | categories = p->cur_vma_category | pagemap_hugetlb_category(pte); | 
|---|
| 2812 |  | 
|---|
| 2813 | if (!pagemap_scan_is_interesting_page(categories, p)) | 
|---|
| 2814 | goto out_unlock; | 
|---|
| 2815 |  | 
|---|
| 2816 | ret = pagemap_scan_output(categories, p, addr: start, end: &end); | 
|---|
| 2817 | if (start == end) | 
|---|
| 2818 | goto out_unlock; | 
|---|
| 2819 |  | 
|---|
| 2820 | if (~categories & PAGE_IS_WRITTEN) | 
|---|
| 2821 | goto out_unlock; | 
|---|
| 2822 |  | 
|---|
| 2823 | if (end != start + HPAGE_SIZE) { | 
|---|
| 2824 | /* Partial HugeTLB page WP isn't possible. */ | 
|---|
| 2825 | pagemap_scan_backout_range(p, addr: start, end); | 
|---|
| 2826 | p->arg.walk_end = start; | 
|---|
| 2827 | ret = 0; | 
|---|
| 2828 | goto out_unlock; | 
|---|
| 2829 | } | 
|---|
| 2830 |  | 
|---|
| 2831 | make_uffd_wp_huge_pte(vma, addr: start, ptep, ptent: pte); | 
|---|
| 2832 | flush_hugetlb_tlb_range(vma, start, end); | 
|---|
| 2833 |  | 
|---|
| 2834 | out_unlock: | 
|---|
| 2835 | spin_unlock(lock: ptl); | 
|---|
| 2836 | i_mmap_unlock_write(mapping: vma->vm_file->f_mapping); | 
|---|
| 2837 |  | 
|---|
| 2838 | return ret; | 
|---|
| 2839 | } | 
|---|
| 2840 | #else | 
|---|
| 2841 | #define pagemap_scan_hugetlb_entry NULL | 
|---|
| 2842 | #endif | 
|---|
| 2843 |  | 
|---|
| 2844 | static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end, | 
|---|
| 2845 | int depth, struct mm_walk *walk) | 
|---|
| 2846 | { | 
|---|
| 2847 | struct pagemap_scan_private *p = walk->private; | 
|---|
| 2848 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 2849 | int ret, err; | 
|---|
| 2850 |  | 
|---|
| 2851 | if (!vma || !pagemap_scan_is_interesting_page(categories: p->cur_vma_category, p)) | 
|---|
| 2852 | return 0; | 
|---|
| 2853 |  | 
|---|
| 2854 | ret = pagemap_scan_output(categories: p->cur_vma_category, p, addr, end: &end); | 
|---|
| 2855 | if (addr == end) | 
|---|
| 2856 | return ret; | 
|---|
| 2857 |  | 
|---|
| 2858 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) | 
|---|
| 2859 | return ret; | 
|---|
| 2860 |  | 
|---|
| 2861 | err = uffd_wp_range(vma, start: addr, len: end - addr, enable_wp: true); | 
|---|
| 2862 | if (err < 0) | 
|---|
| 2863 | ret = err; | 
|---|
| 2864 |  | 
|---|
| 2865 | return ret; | 
|---|
| 2866 | } | 
|---|
| 2867 |  | 
|---|
| 2868 | static const struct mm_walk_ops pagemap_scan_ops = { | 
|---|
| 2869 | .test_walk = pagemap_scan_test_walk, | 
|---|
| 2870 | .pmd_entry = pagemap_scan_pmd_entry, | 
|---|
| 2871 | .pte_hole = pagemap_scan_pte_hole, | 
|---|
| 2872 | .hugetlb_entry = pagemap_scan_hugetlb_entry, | 
|---|
| 2873 | }; | 
|---|
| 2874 |  | 
|---|
| 2875 | static int pagemap_scan_get_args(struct pm_scan_arg *arg, | 
|---|
| 2876 | unsigned long uarg) | 
|---|
| 2877 | { | 
|---|
| 2878 | if (copy_from_user(to: arg, from: (void __user *)uarg, n: sizeof(*arg))) | 
|---|
| 2879 | return -EFAULT; | 
|---|
| 2880 |  | 
|---|
| 2881 | if (arg->size != sizeof(struct pm_scan_arg)) | 
|---|
| 2882 | return -EINVAL; | 
|---|
| 2883 |  | 
|---|
| 2884 | /* Validate requested features */ | 
|---|
| 2885 | if (arg->flags & ~PM_SCAN_FLAGS) | 
|---|
| 2886 | return -EINVAL; | 
|---|
| 2887 | if ((arg->category_inverted | arg->category_mask | | 
|---|
| 2888 | arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES) | 
|---|
| 2889 | return -EINVAL; | 
|---|
| 2890 |  | 
|---|
| 2891 | arg->start = untagged_addr((unsigned long)arg->start); | 
|---|
| 2892 | arg->end = untagged_addr((unsigned long)arg->end); | 
|---|
| 2893 | arg->vec = untagged_addr((unsigned long)arg->vec); | 
|---|
| 2894 |  | 
|---|
| 2895 | /* Validate memory pointers */ | 
|---|
| 2896 | if (!IS_ALIGNED(arg->start, PAGE_SIZE)) | 
|---|
| 2897 | return -EINVAL; | 
|---|
| 2898 | if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start)) | 
|---|
| 2899 | return -EFAULT; | 
|---|
| 2900 | if (!arg->vec && arg->vec_len) | 
|---|
| 2901 | return -EINVAL; | 
|---|
| 2902 | if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX) | 
|---|
| 2903 | return -EINVAL; | 
|---|
| 2904 | if (arg->vec && !access_ok((void __user *)(long)arg->vec, | 
|---|
| 2905 | size_mul(arg->vec_len, sizeof(struct page_region)))) | 
|---|
| 2906 | return -EFAULT; | 
|---|
| 2907 |  | 
|---|
| 2908 | /* Fixup default values */ | 
|---|
| 2909 | arg->end = ALIGN(arg->end, PAGE_SIZE); | 
|---|
| 2910 | arg->walk_end = 0; | 
|---|
| 2911 | if (!arg->max_pages) | 
|---|
| 2912 | arg->max_pages = ULONG_MAX; | 
|---|
| 2913 |  | 
|---|
| 2914 | return 0; | 
|---|
| 2915 | } | 
|---|
| 2916 |  | 
|---|
| 2917 | static int pagemap_scan_writeback_args(struct pm_scan_arg *arg, | 
|---|
| 2918 | unsigned long uargl) | 
|---|
| 2919 | { | 
|---|
| 2920 | struct pm_scan_arg __user *uarg	= (void __user *)uargl; | 
|---|
| 2921 |  | 
|---|
| 2922 | if (copy_to_user(to: &uarg->walk_end, from: &arg->walk_end, n: sizeof(arg->walk_end))) | 
|---|
| 2923 | return -EFAULT; | 
|---|
| 2924 |  | 
|---|
| 2925 | return 0; | 
|---|
| 2926 | } | 
|---|
| 2927 |  | 
|---|
| 2928 | static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p) | 
|---|
| 2929 | { | 
|---|
| 2930 | if (!p->arg.vec_len) | 
|---|
| 2931 | return 0; | 
|---|
| 2932 |  | 
|---|
| 2933 | p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT, | 
|---|
| 2934 | p->arg.vec_len); | 
|---|
| 2935 | p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf), | 
|---|
| 2936 | GFP_KERNEL); | 
|---|
| 2937 | if (!p->vec_buf) | 
|---|
| 2938 | return -ENOMEM; | 
|---|
| 2939 |  | 
|---|
| 2940 | p->vec_buf->start = p->vec_buf->end = 0; | 
|---|
| 2941 | p->vec_out = (struct page_region __user *)(long)p->arg.vec; | 
|---|
| 2942 |  | 
|---|
| 2943 | return 0; | 
|---|
| 2944 | } | 
|---|
| 2945 |  | 
|---|
| 2946 | static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p) | 
|---|
| 2947 | { | 
|---|
| 2948 | const struct page_region *buf = p->vec_buf; | 
|---|
| 2949 | long n = p->vec_buf_index; | 
|---|
| 2950 |  | 
|---|
| 2951 | if (!p->vec_buf) | 
|---|
| 2952 | return 0; | 
|---|
| 2953 |  | 
|---|
| 2954 | if (buf[n].end != buf[n].start) | 
|---|
| 2955 | n++; | 
|---|
| 2956 |  | 
|---|
| 2957 | if (!n) | 
|---|
| 2958 | return 0; | 
|---|
| 2959 |  | 
|---|
| 2960 | if (copy_to_user(to: p->vec_out, from: buf, n: n * sizeof(*buf))) | 
|---|
| 2961 | return -EFAULT; | 
|---|
| 2962 |  | 
|---|
| 2963 | p->arg.vec_len -= n; | 
|---|
| 2964 | p->vec_out += n; | 
|---|
| 2965 |  | 
|---|
| 2966 | p->vec_buf_index = 0; | 
|---|
| 2967 | p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len); | 
|---|
| 2968 | p->vec_buf->start = p->vec_buf->end = 0; | 
|---|
| 2969 |  | 
|---|
| 2970 | return n; | 
|---|
| 2971 | } | 
|---|
| 2972 |  | 
|---|
| 2973 | static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) | 
|---|
| 2974 | { | 
|---|
| 2975 | struct pagemap_scan_private p = {0}; | 
|---|
| 2976 | unsigned long walk_start; | 
|---|
| 2977 | size_t n_ranges_out = 0; | 
|---|
| 2978 | int ret; | 
|---|
| 2979 |  | 
|---|
| 2980 | ret = pagemap_scan_get_args(arg: &p.arg, uarg); | 
|---|
| 2981 | if (ret) | 
|---|
| 2982 | return ret; | 
|---|
| 2983 |  | 
|---|
| 2984 | p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask | | 
|---|
| 2985 | p.arg.return_mask; | 
|---|
| 2986 | ret = pagemap_scan_init_bounce_buffer(p: &p); | 
|---|
| 2987 | if (ret) | 
|---|
| 2988 | return ret; | 
|---|
| 2989 |  | 
|---|
| 2990 | for (walk_start = p.arg.start; walk_start < p.arg.end; | 
|---|
| 2991 | walk_start = p.arg.walk_end) { | 
|---|
| 2992 | struct mmu_notifier_range range; | 
|---|
| 2993 | long n_out; | 
|---|
| 2994 |  | 
|---|
| 2995 | if (fatal_signal_pending(current)) { | 
|---|
| 2996 | ret = -EINTR; | 
|---|
| 2997 | break; | 
|---|
| 2998 | } | 
|---|
| 2999 |  | 
|---|
| 3000 | ret = mmap_read_lock_killable(mm); | 
|---|
| 3001 | if (ret) | 
|---|
| 3002 | break; | 
|---|
| 3003 |  | 
|---|
| 3004 | /* Protection change for the range is going to happen. */ | 
|---|
| 3005 | if (p.arg.flags & PM_SCAN_WP_MATCHING) { | 
|---|
| 3006 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_PROTECTION_VMA, flags: 0, | 
|---|
| 3007 | mm, start: walk_start, end: p.arg.end); | 
|---|
| 3008 | mmu_notifier_invalidate_range_start(range: &range); | 
|---|
| 3009 | } | 
|---|
| 3010 |  | 
|---|
| 3011 | ret = walk_page_range(mm, start: walk_start, end: p.arg.end, | 
|---|
| 3012 | ops: &pagemap_scan_ops, private: &p); | 
|---|
| 3013 |  | 
|---|
| 3014 | if (p.arg.flags & PM_SCAN_WP_MATCHING) | 
|---|
| 3015 | mmu_notifier_invalidate_range_end(range: &range); | 
|---|
| 3016 |  | 
|---|
| 3017 | mmap_read_unlock(mm); | 
|---|
| 3018 |  | 
|---|
| 3019 | n_out = pagemap_scan_flush_buffer(p: &p); | 
|---|
| 3020 | if (n_out < 0) | 
|---|
| 3021 | ret = n_out; | 
|---|
| 3022 | else | 
|---|
| 3023 | n_ranges_out += n_out; | 
|---|
| 3024 |  | 
|---|
| 3025 | if (ret != -ENOSPC) | 
|---|
| 3026 | break; | 
|---|
| 3027 |  | 
|---|
| 3028 | if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages) | 
|---|
| 3029 | break; | 
|---|
| 3030 | } | 
|---|
| 3031 |  | 
|---|
| 3032 | /* ENOSPC signifies early stop (buffer full) from the walk. */ | 
|---|
| 3033 | if (!ret || ret == -ENOSPC) | 
|---|
| 3034 | ret = n_ranges_out; | 
|---|
| 3035 |  | 
|---|
| 3036 | /* The walk_end isn't set when ret is zero */ | 
|---|
| 3037 | if (!p.arg.walk_end) | 
|---|
| 3038 | p.arg.walk_end = p.arg.end; | 
|---|
| 3039 | if (pagemap_scan_writeback_args(arg: &p.arg, uargl: uarg)) | 
|---|
| 3040 | ret = -EFAULT; | 
|---|
| 3041 |  | 
|---|
| 3042 | kfree(objp: p.vec_buf); | 
|---|
| 3043 | return ret; | 
|---|
| 3044 | } | 
|---|
| 3045 |  | 
|---|
| 3046 | static long do_pagemap_cmd(struct file *file, unsigned int cmd, | 
|---|
| 3047 | unsigned long arg) | 
|---|
| 3048 | { | 
|---|
| 3049 | struct mm_struct *mm = file->private_data; | 
|---|
| 3050 |  | 
|---|
| 3051 | switch (cmd) { | 
|---|
| 3052 | case PAGEMAP_SCAN: | 
|---|
| 3053 | return do_pagemap_scan(mm, uarg: arg); | 
|---|
| 3054 |  | 
|---|
| 3055 | default: | 
|---|
| 3056 | return -EINVAL; | 
|---|
| 3057 | } | 
|---|
| 3058 | } | 
|---|
| 3059 |  | 
|---|
| 3060 | const struct file_operations proc_pagemap_operations = { | 
|---|
| 3061 | .llseek		= mem_lseek, /* borrow this */ | 
|---|
| 3062 | .read		= pagemap_read, | 
|---|
| 3063 | .open		= pagemap_open, | 
|---|
| 3064 | .release	= pagemap_release, | 
|---|
| 3065 | .unlocked_ioctl = do_pagemap_cmd, | 
|---|
| 3066 | .compat_ioctl	= do_pagemap_cmd, | 
|---|
| 3067 | }; | 
|---|
| 3068 | #endif /* CONFIG_PROC_PAGE_MONITOR */ | 
|---|
| 3069 |  | 
|---|
| 3070 | #ifdef CONFIG_NUMA | 
|---|
| 3071 |  | 
|---|
| 3072 | struct numa_maps { | 
|---|
| 3073 | unsigned long pages; | 
|---|
| 3074 | unsigned long anon; | 
|---|
| 3075 | unsigned long active; | 
|---|
| 3076 | unsigned long writeback; | 
|---|
| 3077 | unsigned long mapcount_max; | 
|---|
| 3078 | unsigned long dirty; | 
|---|
| 3079 | unsigned long swapcache; | 
|---|
| 3080 | unsigned long node[MAX_NUMNODES]; | 
|---|
| 3081 | }; | 
|---|
| 3082 |  | 
|---|
| 3083 | struct numa_maps_private { | 
|---|
| 3084 | struct proc_maps_private proc_maps; | 
|---|
| 3085 | struct numa_maps md; | 
|---|
| 3086 | }; | 
|---|
| 3087 |  | 
|---|
| 3088 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, | 
|---|
| 3089 | unsigned long nr_pages) | 
|---|
| 3090 | { | 
|---|
| 3091 | struct folio *folio = page_folio(page); | 
|---|
| 3092 | int count; | 
|---|
| 3093 |  | 
|---|
| 3094 | if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) | 
|---|
| 3095 | count = folio_precise_page_mapcount(folio, page); | 
|---|
| 3096 | else | 
|---|
| 3097 | count = folio_average_page_mapcount(folio); | 
|---|
| 3098 |  | 
|---|
| 3099 | md->pages += nr_pages; | 
|---|
| 3100 | if (pte_dirty || folio_test_dirty(folio)) | 
|---|
| 3101 | md->dirty += nr_pages; | 
|---|
| 3102 |  | 
|---|
| 3103 | if (folio_test_swapcache(folio)) | 
|---|
| 3104 | md->swapcache += nr_pages; | 
|---|
| 3105 |  | 
|---|
| 3106 | if (folio_test_active(folio) || folio_test_unevictable(folio)) | 
|---|
| 3107 | md->active += nr_pages; | 
|---|
| 3108 |  | 
|---|
| 3109 | if (folio_test_writeback(folio)) | 
|---|
| 3110 | md->writeback += nr_pages; | 
|---|
| 3111 |  | 
|---|
| 3112 | if (folio_test_anon(folio)) | 
|---|
| 3113 | md->anon += nr_pages; | 
|---|
| 3114 |  | 
|---|
| 3115 | if (count > md->mapcount_max) | 
|---|
| 3116 | md->mapcount_max = count; | 
|---|
| 3117 |  | 
|---|
| 3118 | md->node[folio_nid(folio)] += nr_pages; | 
|---|
| 3119 | } | 
|---|
| 3120 |  | 
|---|
| 3121 | static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, | 
|---|
| 3122 | unsigned long addr) | 
|---|
| 3123 | { | 
|---|
| 3124 | struct page *page; | 
|---|
| 3125 | int nid; | 
|---|
| 3126 |  | 
|---|
| 3127 | if (!pte_present(a: pte)) | 
|---|
| 3128 | return NULL; | 
|---|
| 3129 |  | 
|---|
| 3130 | page = vm_normal_page(vma, addr, pte); | 
|---|
| 3131 | if (!page || is_zone_device_page(page)) | 
|---|
| 3132 | return NULL; | 
|---|
| 3133 |  | 
|---|
| 3134 | if (PageReserved(page)) | 
|---|
| 3135 | return NULL; | 
|---|
| 3136 |  | 
|---|
| 3137 | nid = page_to_nid(page); | 
|---|
| 3138 | if (!node_isset(nid, node_states[N_MEMORY])) | 
|---|
| 3139 | return NULL; | 
|---|
| 3140 |  | 
|---|
| 3141 | return page; | 
|---|
| 3142 | } | 
|---|
| 3143 |  | 
|---|
| 3144 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 3145 | static struct page *can_gather_numa_stats_pmd(pmd_t pmd, | 
|---|
| 3146 | struct vm_area_struct *vma, | 
|---|
| 3147 | unsigned long addr) | 
|---|
| 3148 | { | 
|---|
| 3149 | struct page *page; | 
|---|
| 3150 | int nid; | 
|---|
| 3151 |  | 
|---|
| 3152 | if (!pmd_present(pmd)) | 
|---|
| 3153 | return NULL; | 
|---|
| 3154 |  | 
|---|
| 3155 | page = vm_normal_page_pmd(vma, addr, pmd); | 
|---|
| 3156 | if (!page) | 
|---|
| 3157 | return NULL; | 
|---|
| 3158 |  | 
|---|
| 3159 | if (PageReserved(page)) | 
|---|
| 3160 | return NULL; | 
|---|
| 3161 |  | 
|---|
| 3162 | nid = page_to_nid(page); | 
|---|
| 3163 | if (!node_isset(nid, node_states[N_MEMORY])) | 
|---|
| 3164 | return NULL; | 
|---|
| 3165 |  | 
|---|
| 3166 | return page; | 
|---|
| 3167 | } | 
|---|
| 3168 | #endif | 
|---|
| 3169 |  | 
|---|
| 3170 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | 
|---|
| 3171 | unsigned long end, struct mm_walk *walk) | 
|---|
| 3172 | { | 
|---|
| 3173 | struct numa_maps *md = walk->private; | 
|---|
| 3174 | struct vm_area_struct *vma = walk->vma; | 
|---|
| 3175 | spinlock_t *ptl; | 
|---|
| 3176 | pte_t *orig_pte; | 
|---|
| 3177 | pte_t *pte; | 
|---|
| 3178 |  | 
|---|
| 3179 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 3180 | ptl = pmd_trans_huge_lock(pmd, vma); | 
|---|
| 3181 | if (ptl) { | 
|---|
| 3182 | struct page *page; | 
|---|
| 3183 |  | 
|---|
| 3184 | page = can_gather_numa_stats_pmd(*pmd, vma, addr); | 
|---|
| 3185 | if (page) | 
|---|
| 3186 | gather_stats(page, md, pmd_dirty(*pmd), | 
|---|
| 3187 | HPAGE_PMD_SIZE/PAGE_SIZE); | 
|---|
| 3188 | spin_unlock(ptl); | 
|---|
| 3189 | return 0; | 
|---|
| 3190 | } | 
|---|
| 3191 | #endif | 
|---|
| 3192 | orig_pte = pte = pte_offset_map_lock(mm: walk->mm, pmd, addr, ptlp: &ptl); | 
|---|
| 3193 | if (!pte) { | 
|---|
| 3194 | walk->action = ACTION_AGAIN; | 
|---|
| 3195 | return 0; | 
|---|
| 3196 | } | 
|---|
| 3197 | do { | 
|---|
| 3198 | pte_t ptent = ptep_get(ptep: pte); | 
|---|
| 3199 | struct page *page = can_gather_numa_stats(pte: ptent, vma, addr); | 
|---|
| 3200 | if (!page) | 
|---|
| 3201 | continue; | 
|---|
| 3202 | gather_stats(page, md, pte_dirty: pte_dirty(pte: ptent), nr_pages: 1); | 
|---|
| 3203 |  | 
|---|
| 3204 | } while (pte++, addr += PAGE_SIZE, addr != end); | 
|---|
| 3205 | pte_unmap_unlock(orig_pte, ptl); | 
|---|
| 3206 | cond_resched(); | 
|---|
| 3207 | return 0; | 
|---|
| 3208 | } | 
|---|
| 3209 | #ifdef CONFIG_HUGETLB_PAGE | 
|---|
| 3210 | static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, | 
|---|
| 3211 | unsigned long addr, unsigned long end, struct mm_walk *walk) | 
|---|
| 3212 | { | 
|---|
| 3213 | pte_t huge_pte; | 
|---|
| 3214 | struct numa_maps *md; | 
|---|
| 3215 | struct page *page; | 
|---|
| 3216 | spinlock_t *ptl; | 
|---|
| 3217 |  | 
|---|
| 3218 | ptl = huge_pte_lock(h: hstate_vma(vma: walk->vma), mm: walk->mm, pte); | 
|---|
| 3219 | huge_pte = huge_ptep_get(mm: walk->mm, addr, ptep: pte); | 
|---|
| 3220 | if (!pte_present(a: huge_pte)) | 
|---|
| 3221 | goto out; | 
|---|
| 3222 |  | 
|---|
| 3223 | page = pte_page(huge_pte); | 
|---|
| 3224 |  | 
|---|
| 3225 | md = walk->private; | 
|---|
| 3226 | gather_stats(page, md, pte_dirty: pte_dirty(pte: huge_pte), nr_pages: 1); | 
|---|
| 3227 | out: | 
|---|
| 3228 | spin_unlock(lock: ptl); | 
|---|
| 3229 | return 0; | 
|---|
| 3230 | } | 
|---|
| 3231 |  | 
|---|
| 3232 | #else | 
|---|
| 3233 | static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, | 
|---|
| 3234 | unsigned long addr, unsigned long end, struct mm_walk *walk) | 
|---|
| 3235 | { | 
|---|
| 3236 | return 0; | 
|---|
| 3237 | } | 
|---|
| 3238 | #endif | 
|---|
| 3239 |  | 
|---|
| 3240 | static const struct mm_walk_ops show_numa_ops = { | 
|---|
| 3241 | .hugetlb_entry = gather_hugetlb_stats, | 
|---|
| 3242 | .pmd_entry = gather_pte_stats, | 
|---|
| 3243 | .walk_lock = PGWALK_RDLOCK, | 
|---|
| 3244 | }; | 
|---|
| 3245 |  | 
|---|
| 3246 | /* | 
|---|
| 3247 | * Display pages allocated per node and memory policy via /proc. | 
|---|
| 3248 | */ | 
|---|
| 3249 | static int show_numa_map(struct seq_file *m, void *v) | 
|---|
| 3250 | { | 
|---|
| 3251 | struct numa_maps_private *numa_priv = m->private; | 
|---|
| 3252 | struct proc_maps_private *proc_priv = &numa_priv->proc_maps; | 
|---|
| 3253 | struct vm_area_struct *vma = v; | 
|---|
| 3254 | struct numa_maps *md = &numa_priv->md; | 
|---|
| 3255 | struct file *file = vma->vm_file; | 
|---|
| 3256 | struct mm_struct *mm = vma->vm_mm; | 
|---|
| 3257 | char buffer[64]; | 
|---|
| 3258 | struct mempolicy *pol; | 
|---|
| 3259 | pgoff_t ilx; | 
|---|
| 3260 | int nid; | 
|---|
| 3261 |  | 
|---|
| 3262 | if (!mm) | 
|---|
| 3263 | return 0; | 
|---|
| 3264 |  | 
|---|
| 3265 | /* Ensure we start with an empty set of numa_maps statistics. */ | 
|---|
| 3266 | memset(s: md, c: 0, n: sizeof(*md)); | 
|---|
| 3267 |  | 
|---|
| 3268 | pol = __get_vma_policy(vma, addr: vma->vm_start, ilx: &ilx); | 
|---|
| 3269 | if (pol) { | 
|---|
| 3270 | mpol_to_str(buffer, maxlen: sizeof(buffer), pol); | 
|---|
| 3271 | mpol_cond_put(pol); | 
|---|
| 3272 | } else { | 
|---|
| 3273 | mpol_to_str(buffer, maxlen: sizeof(buffer), pol: proc_priv->task_mempolicy); | 
|---|
| 3274 | } | 
|---|
| 3275 |  | 
|---|
| 3276 | seq_printf(m, fmt: "%08lx %s", vma->vm_start, buffer); | 
|---|
| 3277 |  | 
|---|
| 3278 | if (file) { | 
|---|
| 3279 | seq_puts(m, s: " file="); | 
|---|
| 3280 | seq_path(m, file_user_path(f: file), "\n\t= "); | 
|---|
| 3281 | } else if (vma_is_initial_heap(vma)) { | 
|---|
| 3282 | seq_puts(m, s: " heap"); | 
|---|
| 3283 | } else if (vma_is_initial_stack(vma)) { | 
|---|
| 3284 | seq_puts(m, s: " stack"); | 
|---|
| 3285 | } | 
|---|
| 3286 |  | 
|---|
| 3287 | if (is_vm_hugetlb_page(vma)) | 
|---|
| 3288 | seq_puts(m, s: " huge"); | 
|---|
| 3289 |  | 
|---|
| 3290 | /* mmap_lock is held by m_start */ | 
|---|
| 3291 | walk_page_vma(vma, ops: &show_numa_ops, private: md); | 
|---|
| 3292 |  | 
|---|
| 3293 | if (!md->pages) | 
|---|
| 3294 | goto out; | 
|---|
| 3295 |  | 
|---|
| 3296 | if (md->anon) | 
|---|
| 3297 | seq_printf(m, fmt: " anon=%lu", md->anon); | 
|---|
| 3298 |  | 
|---|
| 3299 | if (md->dirty) | 
|---|
| 3300 | seq_printf(m, fmt: " dirty=%lu", md->dirty); | 
|---|
| 3301 |  | 
|---|
| 3302 | if (md->pages != md->anon && md->pages != md->dirty) | 
|---|
| 3303 | seq_printf(m, fmt: " mapped=%lu", md->pages); | 
|---|
| 3304 |  | 
|---|
| 3305 | if (md->mapcount_max > 1) | 
|---|
| 3306 | seq_printf(m, fmt: " mapmax=%lu", md->mapcount_max); | 
|---|
| 3307 |  | 
|---|
| 3308 | if (md->swapcache) | 
|---|
| 3309 | seq_printf(m, fmt: " swapcache=%lu", md->swapcache); | 
|---|
| 3310 |  | 
|---|
| 3311 | if (md->active < md->pages && !is_vm_hugetlb_page(vma)) | 
|---|
| 3312 | seq_printf(m, fmt: " active=%lu", md->active); | 
|---|
| 3313 |  | 
|---|
| 3314 | if (md->writeback) | 
|---|
| 3315 | seq_printf(m, fmt: " writeback=%lu", md->writeback); | 
|---|
| 3316 |  | 
|---|
| 3317 | for_each_node_state(nid, N_MEMORY) | 
|---|
| 3318 | if (md->node[nid]) | 
|---|
| 3319 | seq_printf(m, fmt: " N%d=%lu", nid, md->node[nid]); | 
|---|
| 3320 |  | 
|---|
| 3321 | seq_printf(m, fmt: " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); | 
|---|
| 3322 | out: | 
|---|
| 3323 | seq_putc(m, c: '\n'); | 
|---|
| 3324 | return 0; | 
|---|
| 3325 | } | 
|---|
| 3326 |  | 
|---|
| 3327 | static const struct seq_operations proc_pid_numa_maps_op = { | 
|---|
| 3328 | .start  = m_start, | 
|---|
| 3329 | .next   = m_next, | 
|---|
| 3330 | .stop   = m_stop, | 
|---|
| 3331 | .show   = show_numa_map, | 
|---|
| 3332 | }; | 
|---|
| 3333 |  | 
|---|
| 3334 | static int pid_numa_maps_open(struct inode *inode, struct file *file) | 
|---|
| 3335 | { | 
|---|
| 3336 | return proc_maps_open(inode, file, ops: &proc_pid_numa_maps_op, | 
|---|
| 3337 | psize: sizeof(struct numa_maps_private)); | 
|---|
| 3338 | } | 
|---|
| 3339 |  | 
|---|
| 3340 | const struct file_operations proc_pid_numa_maps_operations = { | 
|---|
| 3341 | .open		= pid_numa_maps_open, | 
|---|
| 3342 | .read		= seq_read, | 
|---|
| 3343 | .llseek		= seq_lseek, | 
|---|
| 3344 | .release	= proc_map_release, | 
|---|
| 3345 | }; | 
|---|
| 3346 |  | 
|---|
| 3347 | #endif /* CONFIG_NUMA */ | 
|---|
| 3348 |  | 
|---|