| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_MMAN_H | 
|---|
| 3 | #define _LINUX_MMAN_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/fs.h> | 
|---|
| 6 | #include <linux/mm.h> | 
|---|
| 7 | #include <linux/percpu_counter.h> | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/atomic.h> | 
|---|
| 10 | #include <uapi/linux/mman.h> | 
|---|
| 11 |  | 
|---|
| 12 | /* | 
|---|
| 13 | * Arrange for legacy / undefined architecture specific flags to be | 
|---|
| 14 | * ignored by mmap handling code. | 
|---|
| 15 | */ | 
|---|
| 16 | #ifndef MAP_32BIT | 
|---|
| 17 | #define MAP_32BIT 0 | 
|---|
| 18 | #endif | 
|---|
| 19 | #ifndef MAP_ABOVE4G | 
|---|
| 20 | #define MAP_ABOVE4G 0 | 
|---|
| 21 | #endif | 
|---|
| 22 | #ifndef MAP_HUGE_2MB | 
|---|
| 23 | #define MAP_HUGE_2MB 0 | 
|---|
| 24 | #endif | 
|---|
| 25 | #ifndef MAP_HUGE_1GB | 
|---|
| 26 | #define MAP_HUGE_1GB 0 | 
|---|
| 27 | #endif | 
|---|
| 28 | #ifndef MAP_UNINITIALIZED | 
|---|
| 29 | #define MAP_UNINITIALIZED 0 | 
|---|
| 30 | #endif | 
|---|
| 31 | #ifndef MAP_SYNC | 
|---|
| 32 | #define MAP_SYNC 0 | 
|---|
| 33 | #endif | 
|---|
| 34 |  | 
|---|
| 35 | /* | 
|---|
| 36 | * The historical set of flags that all mmap implementations implicitly | 
|---|
| 37 | * support when a ->mmap_validate() op is not provided in file_operations. | 
|---|
| 38 | * | 
|---|
| 39 | * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the | 
|---|
| 40 | * kernel. | 
|---|
| 41 | */ | 
|---|
| 42 | #define LEGACY_MAP_MASK (MAP_SHARED \ | 
|---|
| 43 | | MAP_PRIVATE \ | 
|---|
| 44 | | MAP_FIXED \ | 
|---|
| 45 | | MAP_ANONYMOUS \ | 
|---|
| 46 | | MAP_DENYWRITE \ | 
|---|
| 47 | | MAP_EXECUTABLE \ | 
|---|
| 48 | | MAP_UNINITIALIZED \ | 
|---|
| 49 | | MAP_GROWSDOWN \ | 
|---|
| 50 | | MAP_LOCKED \ | 
|---|
| 51 | | MAP_NORESERVE \ | 
|---|
| 52 | | MAP_POPULATE \ | 
|---|
| 53 | | MAP_NONBLOCK \ | 
|---|
| 54 | | MAP_STACK \ | 
|---|
| 55 | | MAP_HUGETLB \ | 
|---|
| 56 | | MAP_32BIT \ | 
|---|
| 57 | | MAP_ABOVE4G \ | 
|---|
| 58 | | MAP_HUGE_2MB \ | 
|---|
| 59 | | MAP_HUGE_1GB) | 
|---|
| 60 |  | 
|---|
| 61 | extern int sysctl_overcommit_memory; | 
|---|
| 62 | extern struct percpu_counter vm_committed_as; | 
|---|
| 63 |  | 
|---|
| 64 | #ifdef CONFIG_SMP | 
|---|
| 65 | extern s32 vm_committed_as_batch; | 
|---|
| 66 | extern void mm_compute_batch(int overcommit_policy); | 
|---|
| 67 | #else | 
|---|
| 68 | #define vm_committed_as_batch 0 | 
|---|
| 69 | static inline void mm_compute_batch(int overcommit_policy) | 
|---|
| 70 | { | 
|---|
| 71 | } | 
|---|
| 72 | #endif | 
|---|
| 73 |  | 
|---|
| 74 | unsigned long vm_memory_committed(void); | 
|---|
| 75 |  | 
|---|
| 76 | static inline void vm_acct_memory(long pages) | 
|---|
| 77 | { | 
|---|
| 78 | percpu_counter_add_batch(fbc: &vm_committed_as, amount: pages, batch: vm_committed_as_batch); | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | static inline void vm_unacct_memory(long pages) | 
|---|
| 82 | { | 
|---|
| 83 | vm_acct_memory(pages: -pages); | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | /* | 
|---|
| 87 | * Allow architectures to handle additional protection and flag bits. The | 
|---|
| 88 | * overriding macros must be defined in the arch-specific asm/mman.h file. | 
|---|
| 89 | */ | 
|---|
| 90 |  | 
|---|
| 91 | #ifndef arch_calc_vm_prot_bits | 
|---|
| 92 | #define arch_calc_vm_prot_bits(prot, pkey) 0 | 
|---|
| 93 | #endif | 
|---|
| 94 |  | 
|---|
| 95 | #ifndef arch_calc_vm_flag_bits | 
|---|
| 96 | #define arch_calc_vm_flag_bits(file, flags) 0 | 
|---|
| 97 | #endif | 
|---|
| 98 |  | 
|---|
| 99 | #ifndef arch_validate_prot | 
|---|
| 100 | /* | 
|---|
| 101 | * This is called from mprotect().  PROT_GROWSDOWN and PROT_GROWSUP have | 
|---|
| 102 | * already been masked out. | 
|---|
| 103 | * | 
|---|
| 104 | * Returns true if the prot flags are valid | 
|---|
| 105 | */ | 
|---|
| 106 | static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) | 
|---|
| 107 | { | 
|---|
| 108 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; | 
|---|
| 109 | } | 
|---|
| 110 | #define arch_validate_prot arch_validate_prot | 
|---|
| 111 | #endif | 
|---|
| 112 |  | 
|---|
| 113 | #ifndef arch_validate_flags | 
|---|
| 114 | /* | 
|---|
| 115 | * This is called from mmap() and mprotect() with the updated vma->vm_flags. | 
|---|
| 116 | * | 
|---|
| 117 | * Returns true if the VM_* flags are valid. | 
|---|
| 118 | */ | 
|---|
| 119 | static inline bool arch_validate_flags(unsigned long flags) | 
|---|
| 120 | { | 
|---|
| 121 | return true; | 
|---|
| 122 | } | 
|---|
| 123 | #define arch_validate_flags arch_validate_flags | 
|---|
| 124 | #endif | 
|---|
| 125 |  | 
|---|
| 126 | /* | 
|---|
| 127 | * Optimisation macro.  It is equivalent to: | 
|---|
| 128 | *      (x & bit1) ? bit2 : 0 | 
|---|
| 129 | * but this version is faster. | 
|---|
| 130 | * ("bit1" and "bit2" must be single bits) | 
|---|
| 131 | */ | 
|---|
| 132 | #define _calc_vm_trans(x, bit1, bit2) \ | 
|---|
| 133 | ((!(bit1) || !(bit2)) ? 0 : \ | 
|---|
| 134 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ | 
|---|
| 135 | : ((x) & (bit1)) / ((bit1) / (bit2)))) | 
|---|
| 136 |  | 
|---|
| 137 | /* | 
|---|
| 138 | * Combine the mmap "prot" argument into "vm_flags" used internally. | 
|---|
| 139 | */ | 
|---|
| 140 | static inline vm_flags_t | 
|---|
| 141 | calc_vm_prot_bits(unsigned long prot, unsigned long pkey) | 
|---|
| 142 | { | 
|---|
| 143 | return _calc_vm_trans(prot, PROT_READ,  VM_READ ) | | 
|---|
| 144 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | | 
|---|
| 145 | _calc_vm_trans(prot, PROT_EXEC,  VM_EXEC) | | 
|---|
| 146 | arch_calc_vm_prot_bits(prot, pkey); | 
|---|
| 147 | } | 
|---|
| 148 |  | 
|---|
| 149 | /* | 
|---|
| 150 | * Combine the mmap "flags" argument into "vm_flags" used internally. | 
|---|
| 151 | */ | 
|---|
| 152 | static inline vm_flags_t | 
|---|
| 153 | calc_vm_flag_bits(struct file *file, unsigned long flags) | 
|---|
| 154 | { | 
|---|
| 155 | return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) | | 
|---|
| 156 | _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) | | 
|---|
| 157 | _calc_vm_trans(flags, MAP_SYNC,	     VM_SYNC      ) | | 
|---|
| 158 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 159 | _calc_vm_trans(flags, MAP_STACK,	     VM_NOHUGEPAGE) | | 
|---|
| 160 | #endif | 
|---|
| 161 | arch_calc_vm_flag_bits(file, flags); | 
|---|
| 162 | } | 
|---|
| 163 |  | 
|---|
| 164 | unsigned long vm_commit_limit(void); | 
|---|
| 165 |  | 
|---|
| 166 | #ifndef arch_memory_deny_write_exec_supported | 
|---|
| 167 | static inline bool arch_memory_deny_write_exec_supported(void) | 
|---|
| 168 | { | 
|---|
| 169 | return true; | 
|---|
| 170 | } | 
|---|
| 171 | #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported | 
|---|
| 172 | #endif | 
|---|
| 173 |  | 
|---|
| 174 | /* | 
|---|
| 175 | * Denies creating a writable executable mapping or gaining executable permissions. | 
|---|
| 176 | * | 
|---|
| 177 | * This denies the following: | 
|---|
| 178 | * | 
|---|
| 179 | * 	a)	mmap(PROT_WRITE | PROT_EXEC) | 
|---|
| 180 | * | 
|---|
| 181 | *	b)	mmap(PROT_WRITE) | 
|---|
| 182 | *		mprotect(PROT_EXEC) | 
|---|
| 183 | * | 
|---|
| 184 | *	c)	mmap(PROT_WRITE) | 
|---|
| 185 | *		mprotect(PROT_READ) | 
|---|
| 186 | *		mprotect(PROT_EXEC) | 
|---|
| 187 | * | 
|---|
| 188 | * But allows the following: | 
|---|
| 189 | * | 
|---|
| 190 | *	d)	mmap(PROT_READ | PROT_EXEC) | 
|---|
| 191 | *		mmap(PROT_READ | PROT_EXEC | PROT_BTI) | 
|---|
| 192 | * | 
|---|
| 193 | * This is only applicable if the user has set the Memory-Deny-Write-Execute | 
|---|
| 194 | * (MDWE) protection mask for the current process. | 
|---|
| 195 | * | 
|---|
| 196 | * @old specifies the VMA flags the VMA originally possessed, and @new the ones | 
|---|
| 197 | * we propose to set. | 
|---|
| 198 | * | 
|---|
| 199 | * Return: false if proposed change is OK, true if not ok and should be denied. | 
|---|
| 200 | */ | 
|---|
| 201 | static inline bool map_deny_write_exec(unsigned long old, unsigned long new) | 
|---|
| 202 | { | 
|---|
| 203 | /* If MDWE is disabled, we have nothing to deny. */ | 
|---|
| 204 | if (!mm_flags_test(MMF_HAS_MDWE, current->mm)) | 
|---|
| 205 | return false; | 
|---|
| 206 |  | 
|---|
| 207 | /* If the new VMA is not executable, we have nothing to deny. */ | 
|---|
| 208 | if (!(new & VM_EXEC)) | 
|---|
| 209 | return false; | 
|---|
| 210 |  | 
|---|
| 211 | /* Under MDWE we do not accept newly writably executable VMAs... */ | 
|---|
| 212 | if (new & VM_WRITE) | 
|---|
| 213 | return true; | 
|---|
| 214 |  | 
|---|
| 215 | /* ...nor previously non-executable VMAs becoming executable. */ | 
|---|
| 216 | if (!(old & VM_EXEC)) | 
|---|
| 217 | return true; | 
|---|
| 218 |  | 
|---|
| 219 | return false; | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | #endif /* _LINUX_MMAN_H */ | 
|---|
| 223 |  | 
|---|