| 1 | #include <linux/gfp.h> |
| 2 | #include <linux/highmem.h> |
| 3 | #include <linux/kernel.h> |
| 4 | #include <linux/mmdebug.h> |
| 5 | #include <linux/mm_types.h> |
| 6 | #include <linux/mm_inline.h> |
| 7 | #include <linux/pagemap.h> |
| 8 | #include <linux/rcupdate.h> |
| 9 | #include <linux/smp.h> |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | |
| 13 | #include <asm/pgalloc.h> |
| 14 | #include <asm/tlb.h> |
| 15 | |
| 16 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
| 17 | |
| 18 | static bool tlb_next_batch(struct mmu_gather *tlb) |
| 19 | { |
| 20 | struct mmu_gather_batch *batch; |
| 21 | |
| 22 | /* Limit batching if we have delayed rmaps pending */ |
| 23 | if (tlb->delayed_rmap && tlb->active != &tlb->local) |
| 24 | return false; |
| 25 | |
| 26 | batch = tlb->active; |
| 27 | if (batch->next) { |
| 28 | tlb->active = batch->next; |
| 29 | return true; |
| 30 | } |
| 31 | |
| 32 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) |
| 33 | return false; |
| 34 | |
| 35 | batch = (void *)__get_free_page(GFP_NOWAIT); |
| 36 | if (!batch) |
| 37 | return false; |
| 38 | |
| 39 | tlb->batch_count++; |
| 40 | batch->next = NULL; |
| 41 | batch->nr = 0; |
| 42 | batch->max = MAX_GATHER_BATCH; |
| 43 | |
| 44 | tlb->active->next = batch; |
| 45 | tlb->active = batch; |
| 46 | |
| 47 | return true; |
| 48 | } |
| 49 | |
| 50 | #ifdef CONFIG_SMP |
| 51 | static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) |
| 52 | { |
| 53 | struct encoded_page **pages = batch->encoded_pages; |
| 54 | |
| 55 | for (int i = 0; i < batch->nr; i++) { |
| 56 | struct encoded_page *enc = pages[i]; |
| 57 | |
| 58 | if (encoded_page_flags(page: enc) & ENCODED_PAGE_BIT_DELAY_RMAP) { |
| 59 | struct page *page = encoded_page_ptr(page: enc); |
| 60 | unsigned int nr_pages = 1; |
| 61 | |
| 62 | if (unlikely(encoded_page_flags(enc) & |
| 63 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) |
| 64 | nr_pages = encoded_nr_pages(page: pages[++i]); |
| 65 | |
| 66 | folio_remove_rmap_ptes(page_folio(page), page, nr_pages, |
| 67 | vma); |
| 68 | } |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | /** |
| 73 | * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB |
| 74 | * @tlb: the current mmu_gather |
| 75 | * @vma: The memory area from which the pages are being removed. |
| 76 | * |
| 77 | * Note that because of how tlb_next_batch() above works, we will |
| 78 | * never start multiple new batches with pending delayed rmaps, so |
| 79 | * we only need to walk through the current active batch and the |
| 80 | * original local one. |
| 81 | */ |
| 82 | void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 83 | { |
| 84 | if (!tlb->delayed_rmap) |
| 85 | return; |
| 86 | |
| 87 | tlb_flush_rmap_batch(batch: &tlb->local, vma); |
| 88 | if (tlb->active != &tlb->local) |
| 89 | tlb_flush_rmap_batch(batch: tlb->active, vma); |
| 90 | tlb->delayed_rmap = 0; |
| 91 | } |
| 92 | #endif |
| 93 | |
| 94 | /* |
| 95 | * We might end up freeing a lot of pages. Reschedule on a regular |
| 96 | * basis to avoid soft lockups in configurations without full |
| 97 | * preemption enabled. The magic number of 512 folios seems to work. |
| 98 | */ |
| 99 | #define MAX_NR_FOLIOS_PER_FREE 512 |
| 100 | |
| 101 | static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch) |
| 102 | { |
| 103 | struct encoded_page **pages = batch->encoded_pages; |
| 104 | unsigned int nr, nr_pages; |
| 105 | |
| 106 | while (batch->nr) { |
| 107 | if (!page_poisoning_enabled_static() && !want_init_on_free()) { |
| 108 | nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); |
| 109 | |
| 110 | /* |
| 111 | * Make sure we cover page + nr_pages, and don't leave |
| 112 | * nr_pages behind when capping the number of entries. |
| 113 | */ |
| 114 | if (unlikely(encoded_page_flags(pages[nr - 1]) & |
| 115 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) |
| 116 | nr++; |
| 117 | } else { |
| 118 | /* |
| 119 | * With page poisoning and init_on_free, the time it |
| 120 | * takes to free memory grows proportionally with the |
| 121 | * actual memory size. Therefore, limit based on the |
| 122 | * actual memory size and not the number of involved |
| 123 | * folios. |
| 124 | */ |
| 125 | for (nr = 0, nr_pages = 0; |
| 126 | nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; |
| 127 | nr++) { |
| 128 | if (unlikely(encoded_page_flags(pages[nr]) & |
| 129 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) |
| 130 | nr_pages += encoded_nr_pages(page: pages[++nr]); |
| 131 | else |
| 132 | nr_pages++; |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | free_pages_and_swap_cache(pages, nr); |
| 137 | pages += nr; |
| 138 | batch->nr -= nr; |
| 139 | |
| 140 | cond_resched(); |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | static void tlb_batch_pages_flush(struct mmu_gather *tlb) |
| 145 | { |
| 146 | struct mmu_gather_batch *batch; |
| 147 | |
| 148 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) |
| 149 | __tlb_batch_free_encoded_pages(batch); |
| 150 | tlb->active = &tlb->local; |
| 151 | } |
| 152 | |
| 153 | static void tlb_batch_list_free(struct mmu_gather *tlb) |
| 154 | { |
| 155 | struct mmu_gather_batch *batch, *next; |
| 156 | |
| 157 | for (batch = tlb->local.next; batch; batch = next) { |
| 158 | next = batch->next; |
| 159 | free_pages(addr: (unsigned long)batch, order: 0); |
| 160 | } |
| 161 | tlb->local.next = NULL; |
| 162 | } |
| 163 | |
| 164 | static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb, |
| 165 | struct page *page, unsigned int nr_pages, bool delay_rmap, |
| 166 | int page_size) |
| 167 | { |
| 168 | int flags = delay_rmap ? ENCODED_PAGE_BIT_DELAY_RMAP : 0; |
| 169 | struct mmu_gather_batch *batch; |
| 170 | |
| 171 | VM_BUG_ON(!tlb->end); |
| 172 | |
| 173 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
| 174 | VM_WARN_ON(tlb->page_size != page_size); |
| 175 | VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE); |
| 176 | VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); |
| 177 | #endif |
| 178 | |
| 179 | batch = tlb->active; |
| 180 | /* |
| 181 | * Add the page and check if we are full. If so |
| 182 | * force a flush. |
| 183 | */ |
| 184 | if (likely(nr_pages == 1)) { |
| 185 | batch->encoded_pages[batch->nr++] = encode_page(page, flags); |
| 186 | } else { |
| 187 | flags |= ENCODED_PAGE_BIT_NR_PAGES_NEXT; |
| 188 | batch->encoded_pages[batch->nr++] = encode_page(page, flags); |
| 189 | batch->encoded_pages[batch->nr++] = encode_nr_pages(nr: nr_pages); |
| 190 | } |
| 191 | /* |
| 192 | * Make sure that we can always add another "page" + "nr_pages", |
| 193 | * requiring two entries instead of only a single one. |
| 194 | */ |
| 195 | if (batch->nr >= batch->max - 1) { |
| 196 | if (!tlb_next_batch(tlb)) |
| 197 | return true; |
| 198 | batch = tlb->active; |
| 199 | } |
| 200 | VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); |
| 201 | |
| 202 | return false; |
| 203 | } |
| 204 | |
| 205 | bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, |
| 206 | unsigned int nr_pages, bool delay_rmap) |
| 207 | { |
| 208 | return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap, |
| 209 | PAGE_SIZE); |
| 210 | } |
| 211 | |
| 212 | bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, |
| 213 | bool delay_rmap, int page_size) |
| 214 | { |
| 215 | return __tlb_remove_folio_pages_size(tlb, page, nr_pages: 1, delay_rmap, page_size); |
| 216 | } |
| 217 | |
| 218 | #endif /* MMU_GATHER_NO_GATHER */ |
| 219 | |
| 220 | #ifdef CONFIG_MMU_GATHER_TABLE_FREE |
| 221 | |
| 222 | static void __tlb_remove_table_free(struct mmu_table_batch *batch) |
| 223 | { |
| 224 | int i; |
| 225 | |
| 226 | for (i = 0; i < batch->nr; i++) |
| 227 | __tlb_remove_table(table: batch->tables[i]); |
| 228 | |
| 229 | free_page((unsigned long)batch); |
| 230 | } |
| 231 | |
| 232 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE |
| 233 | |
| 234 | /* |
| 235 | * Semi RCU freeing of the page directories. |
| 236 | * |
| 237 | * This is needed by some architectures to implement software pagetable walkers. |
| 238 | * |
| 239 | * gup_fast() and other software pagetable walkers do a lockless page-table |
| 240 | * walk and therefore needs some synchronization with the freeing of the page |
| 241 | * directories. The chosen means to accomplish that is by disabling IRQs over |
| 242 | * the walk. |
| 243 | * |
| 244 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, |
| 245 | * since we unlink the page, flush TLBs, free the page. Since the disabling of |
| 246 | * IRQs delays the completion of the TLB flush we can never observe an already |
| 247 | * freed page. |
| 248 | * |
| 249 | * Not all systems IPI every CPU for this purpose: |
| 250 | * |
| 251 | * - Some architectures have HW support for cross-CPU synchronisation of TLB |
| 252 | * flushes, so there's no IPI at all. |
| 253 | * |
| 254 | * - Paravirt guests can do this TLB flushing in the hypervisor, or coordinate |
| 255 | * with the hypervisor to defer flushing on preempted vCPUs. |
| 256 | * |
| 257 | * Such systems need to delay the freeing by some other means, this is that |
| 258 | * means. |
| 259 | * |
| 260 | * What we do is batch the freed directory pages (tables) and RCU free them. |
| 261 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling |
| 262 | * holds off grace periods. |
| 263 | * |
| 264 | * However, in order to batch these pages we need to allocate storage, this |
| 265 | * allocation is deep inside the MM code and can thus easily fail on memory |
| 266 | * pressure. To guarantee progress we fall back to single table freeing, see |
| 267 | * the implementation of tlb_remove_table_one(). |
| 268 | * |
| 269 | */ |
| 270 | |
| 271 | static void tlb_remove_table_smp_sync(void *arg) |
| 272 | { |
| 273 | /* Simply deliver the interrupt */ |
| 274 | } |
| 275 | |
| 276 | void tlb_remove_table_sync_one(void) |
| 277 | { |
| 278 | /* |
| 279 | * This isn't an RCU grace period and hence the page-tables cannot be |
| 280 | * assumed to be actually RCU-freed. |
| 281 | * |
| 282 | * It is however sufficient for software page-table walkers that rely on |
| 283 | * IRQ disabling. |
| 284 | */ |
| 285 | smp_call_function(func: tlb_remove_table_smp_sync, NULL, wait: 1); |
| 286 | } |
| 287 | |
| 288 | static void tlb_remove_table_rcu(struct rcu_head *head) |
| 289 | { |
| 290 | __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); |
| 291 | } |
| 292 | |
| 293 | static void tlb_remove_table_free(struct mmu_table_batch *batch) |
| 294 | { |
| 295 | call_rcu(head: &batch->rcu, func: tlb_remove_table_rcu); |
| 296 | } |
| 297 | |
| 298 | #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ |
| 299 | |
| 300 | static void tlb_remove_table_free(struct mmu_table_batch *batch) |
| 301 | { |
| 302 | __tlb_remove_table_free(batch); |
| 303 | } |
| 304 | |
| 305 | #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ |
| 306 | |
| 307 | /* |
| 308 | * If we want tlb_remove_table() to imply TLB invalidates. |
| 309 | */ |
| 310 | static inline void tlb_table_invalidate(struct mmu_gather *tlb) |
| 311 | { |
| 312 | if (tlb_needs_table_invalidate()) { |
| 313 | /* |
| 314 | * Invalidate page-table caches used by hardware walkers. Then |
| 315 | * we still need to RCU-sched wait while freeing the pages |
| 316 | * because software walkers can still be in-flight. |
| 317 | */ |
| 318 | tlb_flush_mmu_tlbonly(tlb); |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | #ifdef CONFIG_PT_RECLAIM |
| 323 | static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) |
| 324 | { |
| 325 | struct ptdesc *ptdesc; |
| 326 | |
| 327 | ptdesc = container_of(head, struct ptdesc, pt_rcu_head); |
| 328 | __tlb_remove_table(table: ptdesc); |
| 329 | } |
| 330 | |
| 331 | static inline void __tlb_remove_table_one(void *table) |
| 332 | { |
| 333 | struct ptdesc *ptdesc; |
| 334 | |
| 335 | ptdesc = table; |
| 336 | call_rcu(head: &ptdesc->pt_rcu_head, func: __tlb_remove_table_one_rcu); |
| 337 | } |
| 338 | #else |
| 339 | static inline void __tlb_remove_table_one(void *table) |
| 340 | { |
| 341 | tlb_remove_table_sync_one(); |
| 342 | __tlb_remove_table(table); |
| 343 | } |
| 344 | #endif /* CONFIG_PT_RECLAIM */ |
| 345 | |
| 346 | static void tlb_remove_table_one(void *table) |
| 347 | { |
| 348 | __tlb_remove_table_one(table); |
| 349 | } |
| 350 | |
| 351 | static void tlb_table_flush(struct mmu_gather *tlb) |
| 352 | { |
| 353 | struct mmu_table_batch **batch = &tlb->batch; |
| 354 | |
| 355 | if (*batch) { |
| 356 | tlb_table_invalidate(tlb); |
| 357 | tlb_remove_table_free(batch: *batch); |
| 358 | *batch = NULL; |
| 359 | } |
| 360 | } |
| 361 | |
| 362 | void tlb_remove_table(struct mmu_gather *tlb, void *table) |
| 363 | { |
| 364 | struct mmu_table_batch **batch = &tlb->batch; |
| 365 | |
| 366 | if (*batch == NULL) { |
| 367 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT); |
| 368 | if (*batch == NULL) { |
| 369 | tlb_table_invalidate(tlb); |
| 370 | tlb_remove_table_one(table); |
| 371 | return; |
| 372 | } |
| 373 | (*batch)->nr = 0; |
| 374 | } |
| 375 | |
| 376 | (*batch)->tables[(*batch)->nr++] = table; |
| 377 | if ((*batch)->nr == MAX_TABLE_BATCH) |
| 378 | tlb_table_flush(tlb); |
| 379 | } |
| 380 | |
| 381 | static inline void tlb_table_init(struct mmu_gather *tlb) |
| 382 | { |
| 383 | tlb->batch = NULL; |
| 384 | } |
| 385 | |
| 386 | #else /* !CONFIG_MMU_GATHER_TABLE_FREE */ |
| 387 | |
| 388 | static inline void tlb_table_flush(struct mmu_gather *tlb) { } |
| 389 | static inline void tlb_table_init(struct mmu_gather *tlb) { } |
| 390 | |
| 391 | #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ |
| 392 | |
| 393 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
| 394 | { |
| 395 | tlb_table_flush(tlb); |
| 396 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
| 397 | tlb_batch_pages_flush(tlb); |
| 398 | #endif |
| 399 | } |
| 400 | |
| 401 | void tlb_flush_mmu(struct mmu_gather *tlb) |
| 402 | { |
| 403 | tlb_flush_mmu_tlbonly(tlb); |
| 404 | tlb_flush_mmu_free(tlb); |
| 405 | } |
| 406 | |
| 407 | static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
| 408 | bool fullmm) |
| 409 | { |
| 410 | tlb->mm = mm; |
| 411 | tlb->fullmm = fullmm; |
| 412 | |
| 413 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
| 414 | tlb->need_flush_all = 0; |
| 415 | tlb->local.next = NULL; |
| 416 | tlb->local.nr = 0; |
| 417 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
| 418 | tlb->active = &tlb->local; |
| 419 | tlb->batch_count = 0; |
| 420 | #endif |
| 421 | tlb->delayed_rmap = 0; |
| 422 | |
| 423 | tlb_table_init(tlb); |
| 424 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
| 425 | tlb->page_size = 0; |
| 426 | #endif |
| 427 | tlb->vma_pfn = 0; |
| 428 | |
| 429 | __tlb_reset_range(tlb); |
| 430 | inc_tlb_flush_pending(mm: tlb->mm); |
| 431 | } |
| 432 | |
| 433 | /** |
| 434 | * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down |
| 435 | * @tlb: the mmu_gather structure to initialize |
| 436 | * @mm: the mm_struct of the target address space |
| 437 | * |
| 438 | * Called to initialize an (on-stack) mmu_gather structure for page-table |
| 439 | * tear-down from @mm. |
| 440 | */ |
| 441 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) |
| 442 | { |
| 443 | __tlb_gather_mmu(tlb, mm, fullmm: false); |
| 444 | } |
| 445 | |
| 446 | /** |
| 447 | * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down |
| 448 | * @tlb: the mmu_gather structure to initialize |
| 449 | * @mm: the mm_struct of the target address space |
| 450 | * |
| 451 | * In this case, @mm is without users and we're going to destroy the |
| 452 | * full address space (exit/execve). |
| 453 | * |
| 454 | * Called to initialize an (on-stack) mmu_gather structure for page-table |
| 455 | * tear-down from @mm. |
| 456 | */ |
| 457 | void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) |
| 458 | { |
| 459 | __tlb_gather_mmu(tlb, mm, fullmm: true); |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * tlb_finish_mmu - finish an mmu_gather structure |
| 464 | * @tlb: the mmu_gather structure to finish |
| 465 | * |
| 466 | * Called at the end of the shootdown operation to free up any resources that |
| 467 | * were required. |
| 468 | */ |
| 469 | void tlb_finish_mmu(struct mmu_gather *tlb) |
| 470 | { |
| 471 | /* |
| 472 | * If there are parallel threads are doing PTE changes on same range |
| 473 | * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB |
| 474 | * flush by batching, one thread may end up seeing inconsistent PTEs |
| 475 | * and result in having stale TLB entries. So flush TLB forcefully |
| 476 | * if we detect parallel PTE batching threads. |
| 477 | * |
| 478 | * However, some syscalls, e.g. munmap(), may free page tables, this |
| 479 | * needs force flush everything in the given range. Otherwise this |
| 480 | * may result in having stale TLB entries for some architectures, |
| 481 | * e.g. aarch64, that could specify flush what level TLB. |
| 482 | */ |
| 483 | if (mm_tlb_flush_nested(mm: tlb->mm)) { |
| 484 | /* |
| 485 | * The aarch64 yields better performance with fullmm by |
| 486 | * avoiding multiple CPUs spamming TLBI messages at the |
| 487 | * same time. |
| 488 | * |
| 489 | * On x86 non-fullmm doesn't yield significant difference |
| 490 | * against fullmm. |
| 491 | */ |
| 492 | tlb->fullmm = 1; |
| 493 | __tlb_reset_range(tlb); |
| 494 | tlb->freed_tables = 1; |
| 495 | } |
| 496 | |
| 497 | tlb_flush_mmu(tlb); |
| 498 | |
| 499 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
| 500 | tlb_batch_list_free(tlb); |
| 501 | #endif |
| 502 | dec_tlb_flush_pending(mm: tlb->mm); |
| 503 | } |
| 504 | |