| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright 2020 Advanced Micro Devices, Inc. | 
|---|
| 4 | * | 
|---|
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|---|
| 6 | * copy of this software and associated documentation files (the "Software"), | 
|---|
| 7 | * to deal in the Software without restriction, including without limitation | 
|---|
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|---|
| 9 | * and/or sell copies of the Software, and to permit persons to whom the | 
|---|
| 10 | * Software is furnished to do so, subject to the following conditions: | 
|---|
| 11 | * | 
|---|
| 12 | * The above copyright notice and this permission notice shall be included in | 
|---|
| 13 | * all copies or substantial portions of the Software. | 
|---|
| 14 | * | 
|---|
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|---|
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|---|
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|---|
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
|---|
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
|---|
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
|---|
| 21 | * OTHER DEALINGS IN THE SOFTWARE. | 
|---|
| 22 | * | 
|---|
| 23 | * Authors: Christian König | 
|---|
| 24 | */ | 
|---|
| 25 |  | 
|---|
| 26 | /* Pooling of allocated pages is necessary because changing the caching | 
|---|
| 27 | * attributes on x86 of the linear mapping requires a costly cross CPU TLB | 
|---|
| 28 | * invalidate for those addresses. | 
|---|
| 29 | * | 
|---|
| 30 | * Additional to that allocations from the DMA coherent API are pooled as well | 
|---|
| 31 | * cause they are rather slow compared to alloc_pages+map. | 
|---|
| 32 | */ | 
|---|
| 33 |  | 
|---|
| 34 | #include <linux/export.h> | 
|---|
| 35 | #include <linux/module.h> | 
|---|
| 36 | #include <linux/dma-mapping.h> | 
|---|
| 37 | #include <linux/debugfs.h> | 
|---|
| 38 | #include <linux/highmem.h> | 
|---|
| 39 | #include <linux/sched/mm.h> | 
|---|
| 40 |  | 
|---|
| 41 | #ifdef CONFIG_X86 | 
|---|
| 42 | #include <asm/set_memory.h> | 
|---|
| 43 | #endif | 
|---|
| 44 |  | 
|---|
| 45 | #include <drm/ttm/ttm_backup.h> | 
|---|
| 46 | #include <drm/ttm/ttm_pool.h> | 
|---|
| 47 | #include <drm/ttm/ttm_tt.h> | 
|---|
| 48 | #include <drm/ttm/ttm_bo.h> | 
|---|
| 49 |  | 
|---|
| 50 | #include "ttm_module.h" | 
|---|
| 51 |  | 
|---|
| 52 | #ifdef CONFIG_FAULT_INJECTION | 
|---|
| 53 | #include <linux/fault-inject.h> | 
|---|
| 54 | static DECLARE_FAULT_ATTR(backup_fault_inject); | 
|---|
| 55 | #else | 
|---|
| 56 | #define should_fail(...) false | 
|---|
| 57 | #endif | 
|---|
| 58 |  | 
|---|
| 59 | /** | 
|---|
| 60 | * struct ttm_pool_dma - Helper object for coherent DMA mappings | 
|---|
| 61 | * | 
|---|
| 62 | * @addr: original DMA address returned for the mapping | 
|---|
| 63 | * @vaddr: original vaddr return for the mapping and order in the lower bits | 
|---|
| 64 | */ | 
|---|
| 65 | struct ttm_pool_dma { | 
|---|
| 66 | dma_addr_t addr; | 
|---|
| 67 | unsigned long vaddr; | 
|---|
| 68 | }; | 
|---|
| 69 |  | 
|---|
| 70 | /** | 
|---|
| 71 | * struct ttm_pool_alloc_state - Current state of the tt page allocation process | 
|---|
| 72 | * @pages: Pointer to the next tt page pointer to populate. | 
|---|
| 73 | * @caching_divide: Pointer to the first page pointer whose page has a staged but | 
|---|
| 74 | * not committed caching transition from write-back to @tt_caching. | 
|---|
| 75 | * @dma_addr: Pointer to the next tt dma_address entry to populate if any. | 
|---|
| 76 | * @remaining_pages: Remaining pages to populate. | 
|---|
| 77 | * @tt_caching: The requested cpu-caching for the pages allocated. | 
|---|
| 78 | */ | 
|---|
| 79 | struct ttm_pool_alloc_state { | 
|---|
| 80 | struct page **pages; | 
|---|
| 81 | struct page **caching_divide; | 
|---|
| 82 | dma_addr_t *dma_addr; | 
|---|
| 83 | pgoff_t remaining_pages; | 
|---|
| 84 | enum ttm_caching tt_caching; | 
|---|
| 85 | }; | 
|---|
| 86 |  | 
|---|
| 87 | /** | 
|---|
| 88 | * struct ttm_pool_tt_restore - State representing restore from backup | 
|---|
| 89 | * @pool: The pool used for page allocation while restoring. | 
|---|
| 90 | * @snapshot_alloc: A snapshot of the most recent struct ttm_pool_alloc_state. | 
|---|
| 91 | * @alloced_page: Pointer to the page most recently allocated from a pool or system. | 
|---|
| 92 | * @first_dma: The dma address corresponding to @alloced_page if dma_mapping | 
|---|
| 93 | * is requested. | 
|---|
| 94 | * @alloced_pages: The number of allocated pages present in the struct ttm_tt | 
|---|
| 95 | * page vector from this restore session. | 
|---|
| 96 | * @restored_pages: The number of 4K pages restored for @alloced_page (which | 
|---|
| 97 | * is typically a multi-order page). | 
|---|
| 98 | * @page_caching: The struct ttm_tt requested caching | 
|---|
| 99 | * @order: The order of @alloced_page. | 
|---|
| 100 | * | 
|---|
| 101 | * Recovery from backup might fail when we've recovered less than the | 
|---|
| 102 | * full ttm_tt. In order not to loose any data (yet), keep information | 
|---|
| 103 | * around that allows us to restart a failed ttm backup recovery. | 
|---|
| 104 | */ | 
|---|
| 105 | struct ttm_pool_tt_restore { | 
|---|
| 106 | struct ttm_pool *pool; | 
|---|
| 107 | struct ttm_pool_alloc_state snapshot_alloc; | 
|---|
| 108 | struct page *alloced_page; | 
|---|
| 109 | dma_addr_t first_dma; | 
|---|
| 110 | pgoff_t alloced_pages; | 
|---|
| 111 | pgoff_t restored_pages; | 
|---|
| 112 | enum ttm_caching page_caching; | 
|---|
| 113 | unsigned int order; | 
|---|
| 114 | }; | 
|---|
| 115 |  | 
|---|
| 116 | static unsigned long page_pool_size; | 
|---|
| 117 |  | 
|---|
| 118 | MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); | 
|---|
| 119 | module_param(page_pool_size, ulong, 0644); | 
|---|
| 120 |  | 
|---|
| 121 | static atomic_long_t allocated_pages; | 
|---|
| 122 |  | 
|---|
| 123 | static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS]; | 
|---|
| 124 | static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS]; | 
|---|
| 125 |  | 
|---|
| 126 | static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS]; | 
|---|
| 127 | static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS]; | 
|---|
| 128 |  | 
|---|
| 129 | static spinlock_t shrinker_lock; | 
|---|
| 130 | static struct list_head shrinker_list; | 
|---|
| 131 | static struct shrinker *mm_shrinker; | 
|---|
| 132 | static DECLARE_RWSEM(pool_shrink_rwsem); | 
|---|
| 133 |  | 
|---|
| 134 | /* Allocate pages of size 1 << order with the given gfp_flags */ | 
|---|
| 135 | static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, | 
|---|
| 136 | unsigned int order) | 
|---|
| 137 | { | 
|---|
| 138 | unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; | 
|---|
| 139 | struct ttm_pool_dma *dma; | 
|---|
| 140 | struct page *p; | 
|---|
| 141 | void *vaddr; | 
|---|
| 142 |  | 
|---|
| 143 | /* Don't set the __GFP_COMP flag for higher order allocations. | 
|---|
| 144 | * Mapping pages directly into an userspace process and calling | 
|---|
| 145 | * put_page() on a TTM allocated page is illegal. | 
|---|
| 146 | */ | 
|---|
| 147 | if (order) | 
|---|
| 148 | gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | | 
|---|
| 149 | __GFP_THISNODE; | 
|---|
| 150 |  | 
|---|
| 151 | if (!pool->use_dma_alloc) { | 
|---|
| 152 | p = alloc_pages_node(pool->nid, gfp_flags, order); | 
|---|
| 153 | if (p) | 
|---|
| 154 | p->private = order; | 
|---|
| 155 | return p; | 
|---|
| 156 | } | 
|---|
| 157 |  | 
|---|
| 158 | dma = kmalloc(sizeof(*dma), GFP_KERNEL); | 
|---|
| 159 | if (!dma) | 
|---|
| 160 | return NULL; | 
|---|
| 161 |  | 
|---|
| 162 | if (order) | 
|---|
| 163 | attr |= DMA_ATTR_NO_WARN; | 
|---|
| 164 |  | 
|---|
| 165 | vaddr = dma_alloc_attrs(dev: pool->dev, size: (1ULL << order) * PAGE_SIZE, | 
|---|
| 166 | dma_handle: &dma->addr, flag: gfp_flags, attrs: attr); | 
|---|
| 167 | if (!vaddr) | 
|---|
| 168 | goto error_free; | 
|---|
| 169 |  | 
|---|
| 170 | /* TODO: This is an illegal abuse of the DMA API, but we need to rework | 
|---|
| 171 | * TTM page fault handling and extend the DMA API to clean this up. | 
|---|
| 172 | */ | 
|---|
| 173 | if (is_vmalloc_addr(x: vaddr)) | 
|---|
| 174 | p = vmalloc_to_page(addr: vaddr); | 
|---|
| 175 | else | 
|---|
| 176 | p = virt_to_page(vaddr); | 
|---|
| 177 |  | 
|---|
| 178 | dma->vaddr = (unsigned long)vaddr | order; | 
|---|
| 179 | p->private = (unsigned long)dma; | 
|---|
| 180 | return p; | 
|---|
| 181 |  | 
|---|
| 182 | error_free: | 
|---|
| 183 | kfree(objp: dma); | 
|---|
| 184 | return NULL; | 
|---|
| 185 | } | 
|---|
| 186 |  | 
|---|
| 187 | /* Reset the caching and pages of size 1 << order */ | 
|---|
| 188 | static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, | 
|---|
| 189 | unsigned int order, struct page *p) | 
|---|
| 190 | { | 
|---|
| 191 | unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; | 
|---|
| 192 | struct ttm_pool_dma *dma; | 
|---|
| 193 | void *vaddr; | 
|---|
| 194 |  | 
|---|
| 195 | #ifdef CONFIG_X86 | 
|---|
| 196 | /* We don't care that set_pages_wb is inefficient here. This is only | 
|---|
| 197 | * used when we have to shrink and CPU overhead is irrelevant then. | 
|---|
| 198 | */ | 
|---|
| 199 | if (caching != ttm_cached && !PageHighMem(page: p)) | 
|---|
| 200 | set_pages_wb(page: p, numpages: 1 << order); | 
|---|
| 201 | #endif | 
|---|
| 202 |  | 
|---|
| 203 | if (!pool || !pool->use_dma_alloc) { | 
|---|
| 204 | __free_pages(page: p, order); | 
|---|
| 205 | return; | 
|---|
| 206 | } | 
|---|
| 207 |  | 
|---|
| 208 | if (order) | 
|---|
| 209 | attr |= DMA_ATTR_NO_WARN; | 
|---|
| 210 |  | 
|---|
| 211 | dma = (void *)p->private; | 
|---|
| 212 | vaddr = (void *)(dma->vaddr & PAGE_MASK); | 
|---|
| 213 | dma_free_attrs(dev: pool->dev, size: (1UL << order) * PAGE_SIZE, cpu_addr: vaddr, dma_handle: dma->addr, | 
|---|
| 214 | attrs: attr); | 
|---|
| 215 | kfree(objp: dma); | 
|---|
| 216 | } | 
|---|
| 217 |  | 
|---|
| 218 | /* Apply any cpu-caching deferred during page allocation */ | 
|---|
| 219 | static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc) | 
|---|
| 220 | { | 
|---|
| 221 | #ifdef CONFIG_X86 | 
|---|
| 222 | unsigned int num_pages = alloc->pages - alloc->caching_divide; | 
|---|
| 223 |  | 
|---|
| 224 | if (!num_pages) | 
|---|
| 225 | return 0; | 
|---|
| 226 |  | 
|---|
| 227 | switch (alloc->tt_caching) { | 
|---|
| 228 | case ttm_cached: | 
|---|
| 229 | break; | 
|---|
| 230 | case ttm_write_combined: | 
|---|
| 231 | return set_pages_array_wc(pages: alloc->caching_divide, addrinarray: num_pages); | 
|---|
| 232 | case ttm_uncached: | 
|---|
| 233 | return set_pages_array_uc(pages: alloc->caching_divide, addrinarray: num_pages); | 
|---|
| 234 | } | 
|---|
| 235 | #endif | 
|---|
| 236 | alloc->caching_divide = alloc->pages; | 
|---|
| 237 | return 0; | 
|---|
| 238 | } | 
|---|
| 239 |  | 
|---|
| 240 | /* DMA Map pages of 1 << order size and return the resulting dma_address. */ | 
|---|
| 241 | static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, | 
|---|
| 242 | struct page *p, dma_addr_t *dma_addr) | 
|---|
| 243 | { | 
|---|
| 244 | dma_addr_t addr; | 
|---|
| 245 |  | 
|---|
| 246 | if (pool->use_dma_alloc) { | 
|---|
| 247 | struct ttm_pool_dma *dma = (void *)p->private; | 
|---|
| 248 |  | 
|---|
| 249 | addr = dma->addr; | 
|---|
| 250 | } else { | 
|---|
| 251 | size_t size = (1ULL << order) * PAGE_SIZE; | 
|---|
| 252 |  | 
|---|
| 253 | addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); | 
|---|
| 254 | if (dma_mapping_error(dev: pool->dev, dma_addr: addr)) | 
|---|
| 255 | return -EFAULT; | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|
| 258 | *dma_addr = addr; | 
|---|
| 259 |  | 
|---|
| 260 | return 0; | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | /* Unmap pages of 1 << order size */ | 
|---|
| 264 | static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, | 
|---|
| 265 | unsigned int num_pages) | 
|---|
| 266 | { | 
|---|
| 267 | /* Unmapped while freeing the page */ | 
|---|
| 268 | if (pool->use_dma_alloc) | 
|---|
| 269 | return; | 
|---|
| 270 |  | 
|---|
| 271 | dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, | 
|---|
| 272 | DMA_BIDIRECTIONAL); | 
|---|
| 273 | } | 
|---|
| 274 |  | 
|---|
| 275 | /* Give pages into a specific pool_type */ | 
|---|
| 276 | static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) | 
|---|
| 277 | { | 
|---|
| 278 | unsigned int i, num_pages = 1 << pt->order; | 
|---|
| 279 |  | 
|---|
| 280 | for (i = 0; i < num_pages; ++i) { | 
|---|
| 281 | if (PageHighMem(page: p)) | 
|---|
| 282 | clear_highpage(page: p + i); | 
|---|
| 283 | else | 
|---|
| 284 | clear_page(page_address(p + i)); | 
|---|
| 285 | } | 
|---|
| 286 |  | 
|---|
| 287 | spin_lock(lock: &pt->lock); | 
|---|
| 288 | list_add(new: &p->lru, head: &pt->pages); | 
|---|
| 289 | spin_unlock(lock: &pt->lock); | 
|---|
| 290 | atomic_long_add(i: 1 << pt->order, v: &allocated_pages); | 
|---|
| 291 | } | 
|---|
| 292 |  | 
|---|
| 293 | /* Take pages from a specific pool_type, return NULL when nothing available */ | 
|---|
| 294 | static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) | 
|---|
| 295 | { | 
|---|
| 296 | struct page *p; | 
|---|
| 297 |  | 
|---|
| 298 | spin_lock(lock: &pt->lock); | 
|---|
| 299 | p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); | 
|---|
| 300 | if (p) { | 
|---|
| 301 | atomic_long_sub(i: 1 << pt->order, v: &allocated_pages); | 
|---|
| 302 | list_del(entry: &p->lru); | 
|---|
| 303 | } | 
|---|
| 304 | spin_unlock(lock: &pt->lock); | 
|---|
| 305 |  | 
|---|
| 306 | return p; | 
|---|
| 307 | } | 
|---|
| 308 |  | 
|---|
| 309 | /* Initialize and add a pool type to the global shrinker list */ | 
|---|
| 310 | static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, | 
|---|
| 311 | enum ttm_caching caching, unsigned int order) | 
|---|
| 312 | { | 
|---|
| 313 | pt->pool = pool; | 
|---|
| 314 | pt->caching = caching; | 
|---|
| 315 | pt->order = order; | 
|---|
| 316 | spin_lock_init(&pt->lock); | 
|---|
| 317 | INIT_LIST_HEAD(list: &pt->pages); | 
|---|
| 318 |  | 
|---|
| 319 | spin_lock(lock: &shrinker_lock); | 
|---|
| 320 | list_add_tail(new: &pt->shrinker_list, head: &shrinker_list); | 
|---|
| 321 | spin_unlock(lock: &shrinker_lock); | 
|---|
| 322 | } | 
|---|
| 323 |  | 
|---|
| 324 | /* Remove a pool_type from the global shrinker list and free all pages */ | 
|---|
| 325 | static void ttm_pool_type_fini(struct ttm_pool_type *pt) | 
|---|
| 326 | { | 
|---|
| 327 | struct page *p; | 
|---|
| 328 |  | 
|---|
| 329 | spin_lock(lock: &shrinker_lock); | 
|---|
| 330 | list_del(entry: &pt->shrinker_list); | 
|---|
| 331 | spin_unlock(lock: &shrinker_lock); | 
|---|
| 332 |  | 
|---|
| 333 | while ((p = ttm_pool_type_take(pt))) | 
|---|
| 334 | ttm_pool_free_page(pool: pt->pool, caching: pt->caching, order: pt->order, p); | 
|---|
| 335 | } | 
|---|
| 336 |  | 
|---|
| 337 | /* Return the pool_type to use for the given caching and order */ | 
|---|
| 338 | static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, | 
|---|
| 339 | enum ttm_caching caching, | 
|---|
| 340 | unsigned int order) | 
|---|
| 341 | { | 
|---|
| 342 | if (pool->use_dma_alloc) | 
|---|
| 343 | return &pool->caching[caching].orders[order]; | 
|---|
| 344 |  | 
|---|
| 345 | #ifdef CONFIG_X86 | 
|---|
| 346 | switch (caching) { | 
|---|
| 347 | case ttm_write_combined: | 
|---|
| 348 | if (pool->nid != NUMA_NO_NODE) | 
|---|
| 349 | return &pool->caching[caching].orders[order]; | 
|---|
| 350 |  | 
|---|
| 351 | if (pool->use_dma32) | 
|---|
| 352 | return &global_dma32_write_combined[order]; | 
|---|
| 353 |  | 
|---|
| 354 | return &global_write_combined[order]; | 
|---|
| 355 | case ttm_uncached: | 
|---|
| 356 | if (pool->nid != NUMA_NO_NODE) | 
|---|
| 357 | return &pool->caching[caching].orders[order]; | 
|---|
| 358 |  | 
|---|
| 359 | if (pool->use_dma32) | 
|---|
| 360 | return &global_dma32_uncached[order]; | 
|---|
| 361 |  | 
|---|
| 362 | return &global_uncached[order]; | 
|---|
| 363 | default: | 
|---|
| 364 | break; | 
|---|
| 365 | } | 
|---|
| 366 | #endif | 
|---|
| 367 |  | 
|---|
| 368 | return NULL; | 
|---|
| 369 | } | 
|---|
| 370 |  | 
|---|
| 371 | /* Free pages using the global shrinker list */ | 
|---|
| 372 | static unsigned int ttm_pool_shrink(void) | 
|---|
| 373 | { | 
|---|
| 374 | struct ttm_pool_type *pt; | 
|---|
| 375 | unsigned int num_pages; | 
|---|
| 376 | struct page *p; | 
|---|
| 377 |  | 
|---|
| 378 | down_read(sem: &pool_shrink_rwsem); | 
|---|
| 379 | spin_lock(lock: &shrinker_lock); | 
|---|
| 380 | pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); | 
|---|
| 381 | list_move_tail(list: &pt->shrinker_list, head: &shrinker_list); | 
|---|
| 382 | spin_unlock(lock: &shrinker_lock); | 
|---|
| 383 |  | 
|---|
| 384 | p = ttm_pool_type_take(pt); | 
|---|
| 385 | if (p) { | 
|---|
| 386 | ttm_pool_free_page(pool: pt->pool, caching: pt->caching, order: pt->order, p); | 
|---|
| 387 | num_pages = 1 << pt->order; | 
|---|
| 388 | } else { | 
|---|
| 389 | num_pages = 0; | 
|---|
| 390 | } | 
|---|
| 391 | up_read(sem: &pool_shrink_rwsem); | 
|---|
| 392 |  | 
|---|
| 393 | return num_pages; | 
|---|
| 394 | } | 
|---|
| 395 |  | 
|---|
| 396 | /* Return the allocation order based for a page */ | 
|---|
| 397 | static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) | 
|---|
| 398 | { | 
|---|
| 399 | if (pool->use_dma_alloc) { | 
|---|
| 400 | struct ttm_pool_dma *dma = (void *)p->private; | 
|---|
| 401 |  | 
|---|
| 402 | return dma->vaddr & ~PAGE_MASK; | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | return p->private; | 
|---|
| 406 | } | 
|---|
| 407 |  | 
|---|
| 408 | /* | 
|---|
| 409 | * Split larger pages so that we can free each PAGE_SIZE page as soon | 
|---|
| 410 | * as it has been backed up, in order to avoid memory pressure during | 
|---|
| 411 | * reclaim. | 
|---|
| 412 | */ | 
|---|
| 413 | static void ttm_pool_split_for_swap(struct ttm_pool *pool, struct page *p) | 
|---|
| 414 | { | 
|---|
| 415 | unsigned int order = ttm_pool_page_order(pool, p); | 
|---|
| 416 | pgoff_t nr; | 
|---|
| 417 |  | 
|---|
| 418 | if (!order) | 
|---|
| 419 | return; | 
|---|
| 420 |  | 
|---|
| 421 | split_page(page: p, order); | 
|---|
| 422 | nr = 1UL << order; | 
|---|
| 423 | while (nr--) | 
|---|
| 424 | (p++)->private = 0; | 
|---|
| 425 | } | 
|---|
| 426 |  | 
|---|
| 427 | /** | 
|---|
| 428 | * DOC: Partial backup and restoration of a struct ttm_tt. | 
|---|
| 429 | * | 
|---|
| 430 | * Swapout using ttm_backup_backup_page() and swapin using | 
|---|
| 431 | * ttm_backup_copy_page() may fail. | 
|---|
| 432 | * The former most likely due to lack of swap-space or memory, the latter due | 
|---|
| 433 | * to lack of memory or because of signal interruption during waits. | 
|---|
| 434 | * | 
|---|
| 435 | * Backup failure is easily handled by using a ttm_tt pages vector that holds | 
|---|
| 436 | * both backup handles and page pointers. This has to be taken into account when | 
|---|
| 437 | * restoring such a ttm_tt from backup, and when freeing it while backed up. | 
|---|
| 438 | * When restoring, for simplicity, new pages are actually allocated from the | 
|---|
| 439 | * pool and the contents of any old pages are copied in and then the old pages | 
|---|
| 440 | * are released. | 
|---|
| 441 | * | 
|---|
| 442 | * For restoration failures, the struct ttm_pool_tt_restore holds sufficient state | 
|---|
| 443 | * to be able to resume an interrupted restore, and that structure is freed once | 
|---|
| 444 | * the restoration is complete. If the struct ttm_tt is destroyed while there | 
|---|
| 445 | * is a valid struct ttm_pool_tt_restore attached, that is also properly taken | 
|---|
| 446 | * care of. | 
|---|
| 447 | */ | 
|---|
| 448 |  | 
|---|
| 449 | /* Is restore ongoing for the currently allocated page? */ | 
|---|
| 450 | static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore) | 
|---|
| 451 | { | 
|---|
| 452 | return restore && restore->restored_pages < (1 << restore->order); | 
|---|
| 453 | } | 
|---|
| 454 |  | 
|---|
| 455 | /* DMA unmap and free a multi-order page, either to the relevant pool or to system. */ | 
|---|
| 456 | static pgoff_t ttm_pool_unmap_and_free(struct ttm_pool *pool, struct page *page, | 
|---|
| 457 | const dma_addr_t *dma_addr, enum ttm_caching caching) | 
|---|
| 458 | { | 
|---|
| 459 | struct ttm_pool_type *pt = NULL; | 
|---|
| 460 | unsigned int order; | 
|---|
| 461 | pgoff_t nr; | 
|---|
| 462 |  | 
|---|
| 463 | if (pool) { | 
|---|
| 464 | order = ttm_pool_page_order(pool, p: page); | 
|---|
| 465 | nr = (1UL << order); | 
|---|
| 466 | if (dma_addr) | 
|---|
| 467 | ttm_pool_unmap(pool, dma_addr: *dma_addr, num_pages: nr); | 
|---|
| 468 |  | 
|---|
| 469 | pt = ttm_pool_select_type(pool, caching, order); | 
|---|
| 470 | } else { | 
|---|
| 471 | order = page->private; | 
|---|
| 472 | nr = (1UL << order); | 
|---|
| 473 | } | 
|---|
| 474 |  | 
|---|
| 475 | if (pt) | 
|---|
| 476 | ttm_pool_type_give(pt, p: page); | 
|---|
| 477 | else | 
|---|
| 478 | ttm_pool_free_page(pool, caching, order, p: page); | 
|---|
| 479 |  | 
|---|
| 480 | return nr; | 
|---|
| 481 | } | 
|---|
| 482 |  | 
|---|
| 483 | /* Populate the page-array using the most recent allocated multi-order page. */ | 
|---|
| 484 | static void ttm_pool_allocated_page_commit(struct page *allocated, | 
|---|
| 485 | dma_addr_t first_dma, | 
|---|
| 486 | struct ttm_pool_alloc_state *alloc, | 
|---|
| 487 | pgoff_t nr) | 
|---|
| 488 | { | 
|---|
| 489 | pgoff_t i; | 
|---|
| 490 |  | 
|---|
| 491 | for (i = 0; i < nr; ++i) | 
|---|
| 492 | *alloc->pages++ = allocated++; | 
|---|
| 493 |  | 
|---|
| 494 | alloc->remaining_pages -= nr; | 
|---|
| 495 |  | 
|---|
| 496 | if (!alloc->dma_addr) | 
|---|
| 497 | return; | 
|---|
| 498 |  | 
|---|
| 499 | for (i = 0; i < nr; ++i) { | 
|---|
| 500 | *alloc->dma_addr++ = first_dma; | 
|---|
| 501 | first_dma += PAGE_SIZE; | 
|---|
| 502 | } | 
|---|
| 503 | } | 
|---|
| 504 |  | 
|---|
| 505 | /* | 
|---|
| 506 | * When restoring, restore backed-up content to the newly allocated page and | 
|---|
| 507 | * if successful, populate the page-table and dma-address arrays. | 
|---|
| 508 | */ | 
|---|
| 509 | static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, | 
|---|
| 510 | struct file *backup, | 
|---|
| 511 | const struct ttm_operation_ctx *ctx, | 
|---|
| 512 | struct ttm_pool_alloc_state *alloc) | 
|---|
| 513 |  | 
|---|
| 514 | { | 
|---|
| 515 | pgoff_t i, nr = 1UL << restore->order; | 
|---|
| 516 | struct page **first_page = alloc->pages; | 
|---|
| 517 | struct page *p; | 
|---|
| 518 | int ret = 0; | 
|---|
| 519 |  | 
|---|
| 520 | for (i = restore->restored_pages; i < nr; ++i) { | 
|---|
| 521 | p = first_page[i]; | 
|---|
| 522 | if (ttm_backup_page_ptr_is_handle(page: p)) { | 
|---|
| 523 | unsigned long handle = ttm_backup_page_ptr_to_handle(page: p); | 
|---|
| 524 |  | 
|---|
| 525 | if (IS_ENABLED(CONFIG_FAULT_INJECTION) && ctx->interruptible && | 
|---|
| 526 | should_fail(&backup_fault_inject, 1)) { | 
|---|
| 527 | ret = -EINTR; | 
|---|
| 528 | break; | 
|---|
| 529 | } | 
|---|
| 530 |  | 
|---|
| 531 | if (handle == 0) { | 
|---|
| 532 | restore->restored_pages++; | 
|---|
| 533 | continue; | 
|---|
| 534 | } | 
|---|
| 535 |  | 
|---|
| 536 | ret = ttm_backup_copy_page(backup, dst: restore->alloced_page + i, | 
|---|
| 537 | handle, intr: ctx->interruptible); | 
|---|
| 538 | if (ret) | 
|---|
| 539 | break; | 
|---|
| 540 |  | 
|---|
| 541 | ttm_backup_drop(backup, handle); | 
|---|
| 542 | } else if (p) { | 
|---|
| 543 | /* | 
|---|
| 544 | * We could probably avoid splitting the old page | 
|---|
| 545 | * using clever logic, but ATM we don't care, as | 
|---|
| 546 | * we prioritize releasing memory ASAP. Note that | 
|---|
| 547 | * here, the old retained page is always write-back | 
|---|
| 548 | * cached. | 
|---|
| 549 | */ | 
|---|
| 550 | ttm_pool_split_for_swap(pool: restore->pool, p); | 
|---|
| 551 | copy_highpage(to: restore->alloced_page + i, from: p); | 
|---|
| 552 | __free_pages(page: p, order: 0); | 
|---|
| 553 | } | 
|---|
| 554 |  | 
|---|
| 555 | restore->restored_pages++; | 
|---|
| 556 | first_page[i] = ttm_backup_handle_to_page_ptr(handle: 0); | 
|---|
| 557 | } | 
|---|
| 558 |  | 
|---|
| 559 | if (ret) { | 
|---|
| 560 | if (!restore->restored_pages) { | 
|---|
| 561 | dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL; | 
|---|
| 562 |  | 
|---|
| 563 | ttm_pool_unmap_and_free(pool: restore->pool, page: restore->alloced_page, | 
|---|
| 564 | dma_addr, caching: restore->page_caching); | 
|---|
| 565 | restore->restored_pages = nr; | 
|---|
| 566 | } | 
|---|
| 567 | return ret; | 
|---|
| 568 | } | 
|---|
| 569 |  | 
|---|
| 570 | ttm_pool_allocated_page_commit(allocated: restore->alloced_page, first_dma: restore->first_dma, | 
|---|
| 571 | alloc, nr); | 
|---|
| 572 | if (restore->page_caching == alloc->tt_caching || PageHighMem(page: restore->alloced_page)) | 
|---|
| 573 | alloc->caching_divide = alloc->pages; | 
|---|
| 574 | restore->snapshot_alloc = *alloc; | 
|---|
| 575 | restore->alloced_pages += nr; | 
|---|
| 576 |  | 
|---|
| 577 | return 0; | 
|---|
| 578 | } | 
|---|
| 579 |  | 
|---|
| 580 | /* If restoring, save information needed for ttm_pool_restore_commit(). */ | 
|---|
| 581 | static void | 
|---|
| 582 | ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order, | 
|---|
| 583 | struct page *p, | 
|---|
| 584 | enum ttm_caching page_caching, | 
|---|
| 585 | dma_addr_t first_dma, | 
|---|
| 586 | struct ttm_pool_tt_restore *restore, | 
|---|
| 587 | const struct ttm_pool_alloc_state *alloc) | 
|---|
| 588 | { | 
|---|
| 589 | restore->pool = pool; | 
|---|
| 590 | restore->order = order; | 
|---|
| 591 | restore->restored_pages = 0; | 
|---|
| 592 | restore->page_caching = page_caching; | 
|---|
| 593 | restore->first_dma = first_dma; | 
|---|
| 594 | restore->alloced_page = p; | 
|---|
| 595 | restore->snapshot_alloc = *alloc; | 
|---|
| 596 | } | 
|---|
| 597 |  | 
|---|
| 598 | /* | 
|---|
| 599 | * Called when we got a page, either from a pool or newly allocated. | 
|---|
| 600 | * if needed, dma map the page and populate the dma address array. | 
|---|
| 601 | * Populate the page address array. | 
|---|
| 602 | * If the caching is consistent, update any deferred caching. Otherwise | 
|---|
| 603 | * stage this page for an upcoming deferred caching update. | 
|---|
| 604 | */ | 
|---|
| 605 | static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, | 
|---|
| 606 | struct page *p, enum ttm_caching page_caching, | 
|---|
| 607 | struct ttm_pool_alloc_state *alloc, | 
|---|
| 608 | struct ttm_pool_tt_restore *restore) | 
|---|
| 609 | { | 
|---|
| 610 | bool caching_consistent; | 
|---|
| 611 | dma_addr_t first_dma; | 
|---|
| 612 | int r = 0; | 
|---|
| 613 |  | 
|---|
| 614 | caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(page: p); | 
|---|
| 615 |  | 
|---|
| 616 | if (caching_consistent) { | 
|---|
| 617 | r = ttm_pool_apply_caching(alloc); | 
|---|
| 618 | if (r) | 
|---|
| 619 | return r; | 
|---|
| 620 | } | 
|---|
| 621 |  | 
|---|
| 622 | if (alloc->dma_addr) { | 
|---|
| 623 | r = ttm_pool_map(pool, order, p, dma_addr: &first_dma); | 
|---|
| 624 | if (r) | 
|---|
| 625 | return r; | 
|---|
| 626 | } | 
|---|
| 627 |  | 
|---|
| 628 | if (restore) { | 
|---|
| 629 | ttm_pool_page_allocated_restore(pool, order, p, page_caching, | 
|---|
| 630 | first_dma, restore, alloc); | 
|---|
| 631 | } else { | 
|---|
| 632 | ttm_pool_allocated_page_commit(allocated: p, first_dma, alloc, nr: 1UL << order); | 
|---|
| 633 |  | 
|---|
| 634 | if (caching_consistent) | 
|---|
| 635 | alloc->caching_divide = alloc->pages; | 
|---|
| 636 | } | 
|---|
| 637 |  | 
|---|
| 638 | return 0; | 
|---|
| 639 | } | 
|---|
| 640 |  | 
|---|
| 641 | /** | 
|---|
| 642 | * ttm_pool_free_range() - Free a range of TTM pages | 
|---|
| 643 | * @pool: The pool used for allocating. | 
|---|
| 644 | * @tt: The struct ttm_tt holding the page pointers. | 
|---|
| 645 | * @caching: The page caching mode used by the range. | 
|---|
| 646 | * @start_page: index for first page to free. | 
|---|
| 647 | * @end_page: index for last page to free + 1. | 
|---|
| 648 | * | 
|---|
| 649 | * During allocation the ttm_tt page-vector may be populated with ranges of | 
|---|
| 650 | * pages with different attributes if allocation hit an error without being | 
|---|
| 651 | * able to completely fulfill the allocation. This function can be used | 
|---|
| 652 | * to free these individual ranges. | 
|---|
| 653 | */ | 
|---|
| 654 | static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, | 
|---|
| 655 | enum ttm_caching caching, | 
|---|
| 656 | pgoff_t start_page, pgoff_t end_page) | 
|---|
| 657 | { | 
|---|
| 658 | struct page **pages = &tt->pages[start_page]; | 
|---|
| 659 | struct file *backup = tt->backup; | 
|---|
| 660 | pgoff_t i, nr; | 
|---|
| 661 |  | 
|---|
| 662 | for (i = start_page; i < end_page; i += nr, pages += nr) { | 
|---|
| 663 | struct page *p = *pages; | 
|---|
| 664 |  | 
|---|
| 665 | nr = 1; | 
|---|
| 666 | if (ttm_backup_page_ptr_is_handle(page: p)) { | 
|---|
| 667 | unsigned long handle = ttm_backup_page_ptr_to_handle(page: p); | 
|---|
| 668 |  | 
|---|
| 669 | if (handle != 0) | 
|---|
| 670 | ttm_backup_drop(backup, handle); | 
|---|
| 671 | } else if (p) { | 
|---|
| 672 | dma_addr_t *dma_addr = tt->dma_address ? | 
|---|
| 673 | tt->dma_address + i : NULL; | 
|---|
| 674 |  | 
|---|
| 675 | nr = ttm_pool_unmap_and_free(pool, page: p, dma_addr, caching); | 
|---|
| 676 | } | 
|---|
| 677 | } | 
|---|
| 678 | } | 
|---|
| 679 |  | 
|---|
| 680 | static void ttm_pool_alloc_state_init(const struct ttm_tt *tt, | 
|---|
| 681 | struct ttm_pool_alloc_state *alloc) | 
|---|
| 682 | { | 
|---|
| 683 | alloc->pages = tt->pages; | 
|---|
| 684 | alloc->caching_divide = tt->pages; | 
|---|
| 685 | alloc->dma_addr = tt->dma_address; | 
|---|
| 686 | alloc->remaining_pages = tt->num_pages; | 
|---|
| 687 | alloc->tt_caching = tt->caching; | 
|---|
| 688 | } | 
|---|
| 689 |  | 
|---|
| 690 | /* | 
|---|
| 691 | * Find a suitable allocation order based on highest desired order | 
|---|
| 692 | * and number of remaining pages | 
|---|
| 693 | */ | 
|---|
| 694 | static unsigned int ttm_pool_alloc_find_order(unsigned int highest, | 
|---|
| 695 | const struct ttm_pool_alloc_state *alloc) | 
|---|
| 696 | { | 
|---|
| 697 | return min_t(unsigned int, highest, __fls(alloc->remaining_pages)); | 
|---|
| 698 | } | 
|---|
| 699 |  | 
|---|
| 700 | static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, | 
|---|
| 701 | const struct ttm_operation_ctx *ctx, | 
|---|
| 702 | struct ttm_pool_alloc_state *alloc, | 
|---|
| 703 | struct ttm_pool_tt_restore *restore) | 
|---|
| 704 | { | 
|---|
| 705 | enum ttm_caching page_caching; | 
|---|
| 706 | gfp_t gfp_flags = GFP_USER; | 
|---|
| 707 | pgoff_t caching_divide; | 
|---|
| 708 | unsigned int order; | 
|---|
| 709 | bool allow_pools; | 
|---|
| 710 | struct page *p; | 
|---|
| 711 | int r; | 
|---|
| 712 |  | 
|---|
| 713 | WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt)); | 
|---|
| 714 | WARN_ON(alloc->dma_addr && !pool->dev); | 
|---|
| 715 |  | 
|---|
| 716 | if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC) | 
|---|
| 717 | gfp_flags |= __GFP_ZERO; | 
|---|
| 718 |  | 
|---|
| 719 | if (ctx->gfp_retry_mayfail) | 
|---|
| 720 | gfp_flags |= __GFP_RETRY_MAYFAIL; | 
|---|
| 721 |  | 
|---|
| 722 | if (pool->use_dma32) | 
|---|
| 723 | gfp_flags |= GFP_DMA32; | 
|---|
| 724 | else | 
|---|
| 725 | gfp_flags |= GFP_HIGHUSER; | 
|---|
| 726 |  | 
|---|
| 727 | page_caching = tt->caching; | 
|---|
| 728 | allow_pools = true; | 
|---|
| 729 | for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc); | 
|---|
| 730 | alloc->remaining_pages; | 
|---|
| 731 | order = ttm_pool_alloc_find_order(highest: order, alloc)) { | 
|---|
| 732 | struct ttm_pool_type *pt; | 
|---|
| 733 |  | 
|---|
| 734 | /* First, try to allocate a page from a pool if one exists. */ | 
|---|
| 735 | p = NULL; | 
|---|
| 736 | pt = ttm_pool_select_type(pool, caching: page_caching, order); | 
|---|
| 737 | if (pt && allow_pools) | 
|---|
| 738 | p = ttm_pool_type_take(pt); | 
|---|
| 739 | /* | 
|---|
| 740 | * If that fails or previously failed, allocate from system. | 
|---|
| 741 | * Note that this also disallows additional pool allocations using | 
|---|
| 742 | * write-back cached pools of the same order. Consider removing | 
|---|
| 743 | * that behaviour. | 
|---|
| 744 | */ | 
|---|
| 745 | if (!p) { | 
|---|
| 746 | page_caching = ttm_cached; | 
|---|
| 747 | allow_pools = false; | 
|---|
| 748 | p = ttm_pool_alloc_page(pool, gfp_flags, order); | 
|---|
| 749 | } | 
|---|
| 750 | /* If that fails, lower the order if possible and retry. */ | 
|---|
| 751 | if (!p) { | 
|---|
| 752 | if (order) { | 
|---|
| 753 | --order; | 
|---|
| 754 | page_caching = tt->caching; | 
|---|
| 755 | allow_pools = true; | 
|---|
| 756 | continue; | 
|---|
| 757 | } | 
|---|
| 758 | r = -ENOMEM; | 
|---|
| 759 | goto error_free_all; | 
|---|
| 760 | } | 
|---|
| 761 | r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc, | 
|---|
| 762 | restore); | 
|---|
| 763 | if (r) | 
|---|
| 764 | goto error_free_page; | 
|---|
| 765 |  | 
|---|
| 766 | if (ttm_pool_restore_valid(restore)) { | 
|---|
| 767 | r = ttm_pool_restore_commit(restore, backup: tt->backup, ctx, alloc); | 
|---|
| 768 | if (r) | 
|---|
| 769 | goto error_free_all; | 
|---|
| 770 | } | 
|---|
| 771 | } | 
|---|
| 772 |  | 
|---|
| 773 | r = ttm_pool_apply_caching(alloc); | 
|---|
| 774 | if (r) | 
|---|
| 775 | goto error_free_all; | 
|---|
| 776 |  | 
|---|
| 777 | kfree(objp: tt->restore); | 
|---|
| 778 | tt->restore = NULL; | 
|---|
| 779 |  | 
|---|
| 780 | return 0; | 
|---|
| 781 |  | 
|---|
| 782 | error_free_page: | 
|---|
| 783 | ttm_pool_free_page(pool, caching: page_caching, order, p); | 
|---|
| 784 |  | 
|---|
| 785 | error_free_all: | 
|---|
| 786 | if (tt->restore) | 
|---|
| 787 | return r; | 
|---|
| 788 |  | 
|---|
| 789 | caching_divide = alloc->caching_divide - tt->pages; | 
|---|
| 790 | ttm_pool_free_range(pool, tt, caching: tt->caching, start_page: 0, end_page: caching_divide); | 
|---|
| 791 | ttm_pool_free_range(pool, tt, caching: ttm_cached, start_page: caching_divide, | 
|---|
| 792 | end_page: tt->num_pages - alloc->remaining_pages); | 
|---|
| 793 |  | 
|---|
| 794 | return r; | 
|---|
| 795 | } | 
|---|
| 796 |  | 
|---|
| 797 | /** | 
|---|
| 798 | * ttm_pool_alloc - Fill a ttm_tt object | 
|---|
| 799 | * | 
|---|
| 800 | * @pool: ttm_pool to use | 
|---|
| 801 | * @tt: ttm_tt object to fill | 
|---|
| 802 | * @ctx: operation context | 
|---|
| 803 | * | 
|---|
| 804 | * Fill the ttm_tt object with pages and also make sure to DMA map them when | 
|---|
| 805 | * necessary. | 
|---|
| 806 | * | 
|---|
| 807 | * Returns: 0 on successe, negative error code otherwise. | 
|---|
| 808 | */ | 
|---|
| 809 | int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, | 
|---|
| 810 | struct ttm_operation_ctx *ctx) | 
|---|
| 811 | { | 
|---|
| 812 | struct ttm_pool_alloc_state alloc; | 
|---|
| 813 |  | 
|---|
| 814 | if (WARN_ON(ttm_tt_is_backed_up(tt))) | 
|---|
| 815 | return -EINVAL; | 
|---|
| 816 |  | 
|---|
| 817 | ttm_pool_alloc_state_init(tt, alloc: &alloc); | 
|---|
| 818 |  | 
|---|
| 819 | return __ttm_pool_alloc(pool, tt, ctx, alloc: &alloc, NULL); | 
|---|
| 820 | } | 
|---|
| 821 | EXPORT_SYMBOL(ttm_pool_alloc); | 
|---|
| 822 |  | 
|---|
| 823 | /** | 
|---|
| 824 | * ttm_pool_restore_and_alloc - Fill a ttm_tt, restoring previously backed-up | 
|---|
| 825 | * content. | 
|---|
| 826 | * | 
|---|
| 827 | * @pool: ttm_pool to use | 
|---|
| 828 | * @tt: ttm_tt object to fill | 
|---|
| 829 | * @ctx: operation context | 
|---|
| 830 | * | 
|---|
| 831 | * Fill the ttm_tt object with pages and also make sure to DMA map them when | 
|---|
| 832 | * necessary. Read in backed-up content. | 
|---|
| 833 | * | 
|---|
| 834 | * Returns: 0 on successe, negative error code otherwise. | 
|---|
| 835 | */ | 
|---|
| 836 | int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt, | 
|---|
| 837 | const struct ttm_operation_ctx *ctx) | 
|---|
| 838 | { | 
|---|
| 839 | struct ttm_pool_alloc_state alloc; | 
|---|
| 840 |  | 
|---|
| 841 | if (WARN_ON(!ttm_tt_is_backed_up(tt))) | 
|---|
| 842 | return -EINVAL; | 
|---|
| 843 |  | 
|---|
| 844 | if (!tt->restore) { | 
|---|
| 845 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; | 
|---|
| 846 |  | 
|---|
| 847 | ttm_pool_alloc_state_init(tt, alloc: &alloc); | 
|---|
| 848 | if (ctx->gfp_retry_mayfail) | 
|---|
| 849 | gfp |= __GFP_RETRY_MAYFAIL; | 
|---|
| 850 |  | 
|---|
| 851 | tt->restore = kzalloc(sizeof(*tt->restore), gfp); | 
|---|
| 852 | if (!tt->restore) | 
|---|
| 853 | return -ENOMEM; | 
|---|
| 854 |  | 
|---|
| 855 | tt->restore->snapshot_alloc = alloc; | 
|---|
| 856 | tt->restore->pool = pool; | 
|---|
| 857 | tt->restore->restored_pages = 1; | 
|---|
| 858 | } else { | 
|---|
| 859 | struct ttm_pool_tt_restore *restore = tt->restore; | 
|---|
| 860 | int ret; | 
|---|
| 861 |  | 
|---|
| 862 | alloc = restore->snapshot_alloc; | 
|---|
| 863 | if (ttm_pool_restore_valid(restore: tt->restore)) { | 
|---|
| 864 | ret = ttm_pool_restore_commit(restore, backup: tt->backup, ctx, alloc: &alloc); | 
|---|
| 865 | if (ret) | 
|---|
| 866 | return ret; | 
|---|
| 867 | } | 
|---|
| 868 | if (!alloc.remaining_pages) | 
|---|
| 869 | return 0; | 
|---|
| 870 | } | 
|---|
| 871 |  | 
|---|
| 872 | return __ttm_pool_alloc(pool, tt, ctx, alloc: &alloc, restore: tt->restore); | 
|---|
| 873 | } | 
|---|
| 874 |  | 
|---|
| 875 | /** | 
|---|
| 876 | * ttm_pool_free - Free the backing pages from a ttm_tt object | 
|---|
| 877 | * | 
|---|
| 878 | * @pool: Pool to give pages back to. | 
|---|
| 879 | * @tt: ttm_tt object to unpopulate | 
|---|
| 880 | * | 
|---|
| 881 | * Give the packing pages back to a pool or free them | 
|---|
| 882 | */ | 
|---|
| 883 | void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) | 
|---|
| 884 | { | 
|---|
| 885 | ttm_pool_free_range(pool, tt, caching: tt->caching, start_page: 0, end_page: tt->num_pages); | 
|---|
| 886 |  | 
|---|
| 887 | while (atomic_long_read(v: &allocated_pages) > page_pool_size) | 
|---|
| 888 | ttm_pool_shrink(); | 
|---|
| 889 | } | 
|---|
| 890 | EXPORT_SYMBOL(ttm_pool_free); | 
|---|
| 891 |  | 
|---|
| 892 | /** | 
|---|
| 893 | * ttm_pool_drop_backed_up() - Release content of a swapped-out struct ttm_tt | 
|---|
| 894 | * @tt: The struct ttm_tt. | 
|---|
| 895 | * | 
|---|
| 896 | * Release handles with associated content or any remaining pages of | 
|---|
| 897 | * a backed-up struct ttm_tt. | 
|---|
| 898 | */ | 
|---|
| 899 | void ttm_pool_drop_backed_up(struct ttm_tt *tt) | 
|---|
| 900 | { | 
|---|
| 901 | struct ttm_pool_tt_restore *restore; | 
|---|
| 902 | pgoff_t start_page = 0; | 
|---|
| 903 |  | 
|---|
| 904 | WARN_ON(!ttm_tt_is_backed_up(tt)); | 
|---|
| 905 |  | 
|---|
| 906 | restore = tt->restore; | 
|---|
| 907 |  | 
|---|
| 908 | /* | 
|---|
| 909 | * Unmap and free any uncommitted restore page. | 
|---|
| 910 | * any tt page-array backup entries already read back has | 
|---|
| 911 | * been cleared already | 
|---|
| 912 | */ | 
|---|
| 913 | if (ttm_pool_restore_valid(restore)) { | 
|---|
| 914 | dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL; | 
|---|
| 915 |  | 
|---|
| 916 | ttm_pool_unmap_and_free(pool: restore->pool, page: restore->alloced_page, | 
|---|
| 917 | dma_addr, caching: restore->page_caching); | 
|---|
| 918 | restore->restored_pages = 1UL << restore->order; | 
|---|
| 919 | } | 
|---|
| 920 |  | 
|---|
| 921 | /* | 
|---|
| 922 | * If a restore is ongoing, part of the tt pages may have a | 
|---|
| 923 | * caching different than writeback. | 
|---|
| 924 | */ | 
|---|
| 925 | if (restore) { | 
|---|
| 926 | pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages; | 
|---|
| 927 |  | 
|---|
| 928 | start_page = restore->alloced_pages; | 
|---|
| 929 | WARN_ON(mid > start_page); | 
|---|
| 930 | /* Pages that might be dma-mapped and non-cached */ | 
|---|
| 931 | ttm_pool_free_range(pool: restore->pool, tt, caching: tt->caching, | 
|---|
| 932 | start_page: 0, end_page: mid); | 
|---|
| 933 | /* Pages that might be dma-mapped but cached */ | 
|---|
| 934 | ttm_pool_free_range(pool: restore->pool, tt, caching: ttm_cached, | 
|---|
| 935 | start_page: mid, end_page: restore->alloced_pages); | 
|---|
| 936 | kfree(objp: restore); | 
|---|
| 937 | tt->restore = NULL; | 
|---|
| 938 | } | 
|---|
| 939 |  | 
|---|
| 940 | ttm_pool_free_range(NULL, tt, caching: ttm_cached, start_page, end_page: tt->num_pages); | 
|---|
| 941 | } | 
|---|
| 942 |  | 
|---|
| 943 | /** | 
|---|
| 944 | * ttm_pool_backup() - Back up or purge a struct ttm_tt | 
|---|
| 945 | * @pool: The pool used when allocating the struct ttm_tt. | 
|---|
| 946 | * @tt: The struct ttm_tt. | 
|---|
| 947 | * @flags: Flags to govern the backup behaviour. | 
|---|
| 948 | * | 
|---|
| 949 | * Back up or purge a struct ttm_tt. If @purge is true, then | 
|---|
| 950 | * all pages will be freed directly to the system rather than to the pool | 
|---|
| 951 | * they were allocated from, making the function behave similarly to | 
|---|
| 952 | * ttm_pool_free(). If @purge is false the pages will be backed up instead, | 
|---|
| 953 | * exchanged for handles. | 
|---|
| 954 | * A subsequent call to ttm_pool_restore_and_alloc() will then read back the content and | 
|---|
| 955 | * a subsequent call to ttm_pool_drop_backed_up() will drop it. | 
|---|
| 956 | * If backup of a page fails for whatever reason, @ttm will still be | 
|---|
| 957 | * partially backed up, retaining those pages for which backup fails. | 
|---|
| 958 | * In that case, this function can be retried, possibly after freeing up | 
|---|
| 959 | * memory resources. | 
|---|
| 960 | * | 
|---|
| 961 | * Return: Number of pages actually backed up or freed, or negative | 
|---|
| 962 | * error code on error. | 
|---|
| 963 | */ | 
|---|
| 964 | long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, | 
|---|
| 965 | const struct ttm_backup_flags *flags) | 
|---|
| 966 | { | 
|---|
| 967 | struct file *backup = tt->backup; | 
|---|
| 968 | struct page *page; | 
|---|
| 969 | unsigned long handle; | 
|---|
| 970 | gfp_t alloc_gfp; | 
|---|
| 971 | gfp_t gfp; | 
|---|
| 972 | int ret = 0; | 
|---|
| 973 | pgoff_t shrunken = 0; | 
|---|
| 974 | pgoff_t i, num_pages; | 
|---|
| 975 |  | 
|---|
| 976 | if (WARN_ON(ttm_tt_is_backed_up(tt))) | 
|---|
| 977 | return -EINVAL; | 
|---|
| 978 |  | 
|---|
| 979 | if ((!ttm_backup_bytes_avail() && !flags->purge) || | 
|---|
| 980 | pool->use_dma_alloc || ttm_tt_is_backed_up(tt)) | 
|---|
| 981 | return -EBUSY; | 
|---|
| 982 |  | 
|---|
| 983 | #ifdef CONFIG_X86 | 
|---|
| 984 | /* Anything returned to the system needs to be cached. */ | 
|---|
| 985 | if (tt->caching != ttm_cached) | 
|---|
| 986 | set_pages_array_wb(pages: tt->pages, addrinarray: tt->num_pages); | 
|---|
| 987 | #endif | 
|---|
| 988 |  | 
|---|
| 989 | if (tt->dma_address || flags->purge) { | 
|---|
| 990 | for (i = 0; i < tt->num_pages; i += num_pages) { | 
|---|
| 991 | unsigned int order; | 
|---|
| 992 |  | 
|---|
| 993 | page = tt->pages[i]; | 
|---|
| 994 | if (unlikely(!page)) { | 
|---|
| 995 | num_pages = 1; | 
|---|
| 996 | continue; | 
|---|
| 997 | } | 
|---|
| 998 |  | 
|---|
| 999 | order = ttm_pool_page_order(pool, p: page); | 
|---|
| 1000 | num_pages = 1UL << order; | 
|---|
| 1001 | if (tt->dma_address) | 
|---|
| 1002 | ttm_pool_unmap(pool, dma_addr: tt->dma_address[i], | 
|---|
| 1003 | num_pages); | 
|---|
| 1004 | if (flags->purge) { | 
|---|
| 1005 | shrunken += num_pages; | 
|---|
| 1006 | page->private = 0; | 
|---|
| 1007 | __free_pages(page, order); | 
|---|
| 1008 | memset(s: tt->pages + i, c: 0, | 
|---|
| 1009 | n: num_pages * sizeof(*tt->pages)); | 
|---|
| 1010 | } | 
|---|
| 1011 | } | 
|---|
| 1012 | } | 
|---|
| 1013 |  | 
|---|
| 1014 | if (flags->purge) | 
|---|
| 1015 | return shrunken; | 
|---|
| 1016 |  | 
|---|
| 1017 | if (pool->use_dma32) | 
|---|
| 1018 | gfp = GFP_DMA32; | 
|---|
| 1019 | else | 
|---|
| 1020 | gfp = GFP_HIGHUSER; | 
|---|
| 1021 |  | 
|---|
| 1022 | alloc_gfp = GFP_KERNEL | __GFP_HIGH | __GFP_NOWARN | __GFP_RETRY_MAYFAIL; | 
|---|
| 1023 |  | 
|---|
| 1024 | num_pages = tt->num_pages; | 
|---|
| 1025 |  | 
|---|
| 1026 | /* Pretend doing fault injection by shrinking only half of the pages. */ | 
|---|
| 1027 | if (IS_ENABLED(CONFIG_FAULT_INJECTION) && should_fail(&backup_fault_inject, 1)) | 
|---|
| 1028 | num_pages = DIV_ROUND_UP(num_pages, 2); | 
|---|
| 1029 |  | 
|---|
| 1030 | for (i = 0; i < num_pages; ++i) { | 
|---|
| 1031 | s64 shandle; | 
|---|
| 1032 |  | 
|---|
| 1033 | page = tt->pages[i]; | 
|---|
| 1034 | if (unlikely(!page)) | 
|---|
| 1035 | continue; | 
|---|
| 1036 |  | 
|---|
| 1037 | ttm_pool_split_for_swap(pool, p: page); | 
|---|
| 1038 |  | 
|---|
| 1039 | shandle = ttm_backup_backup_page(backup, page, writeback: flags->writeback, idx: i, | 
|---|
| 1040 | page_gfp: gfp, alloc_gfp); | 
|---|
| 1041 | if (shandle < 0) { | 
|---|
| 1042 | /* We allow partially shrunken tts */ | 
|---|
| 1043 | ret = shandle; | 
|---|
| 1044 | break; | 
|---|
| 1045 | } | 
|---|
| 1046 | handle = shandle; | 
|---|
| 1047 | tt->pages[i] = ttm_backup_handle_to_page_ptr(handle); | 
|---|
| 1048 | put_page(page); | 
|---|
| 1049 | shrunken++; | 
|---|
| 1050 | } | 
|---|
| 1051 |  | 
|---|
| 1052 | return shrunken ? shrunken : ret; | 
|---|
| 1053 | } | 
|---|
| 1054 |  | 
|---|
| 1055 | /** | 
|---|
| 1056 | * ttm_pool_init - Initialize a pool | 
|---|
| 1057 | * | 
|---|
| 1058 | * @pool: the pool to initialize | 
|---|
| 1059 | * @dev: device for DMA allocations and mappings | 
|---|
| 1060 | * @nid: NUMA node to use for allocations | 
|---|
| 1061 | * @use_dma_alloc: true if coherent DMA alloc should be used | 
|---|
| 1062 | * @use_dma32: true if GFP_DMA32 should be used | 
|---|
| 1063 | * | 
|---|
| 1064 | * Initialize the pool and its pool types. | 
|---|
| 1065 | */ | 
|---|
| 1066 | void ttm_pool_init(struct ttm_pool *pool, struct device *dev, | 
|---|
| 1067 | int nid, bool use_dma_alloc, bool use_dma32) | 
|---|
| 1068 | { | 
|---|
| 1069 | unsigned int i, j; | 
|---|
| 1070 |  | 
|---|
| 1071 | WARN_ON(!dev && use_dma_alloc); | 
|---|
| 1072 |  | 
|---|
| 1073 | pool->dev = dev; | 
|---|
| 1074 | pool->nid = nid; | 
|---|
| 1075 | pool->use_dma_alloc = use_dma_alloc; | 
|---|
| 1076 | pool->use_dma32 = use_dma32; | 
|---|
| 1077 |  | 
|---|
| 1078 | for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { | 
|---|
| 1079 | for (j = 0; j < NR_PAGE_ORDERS; ++j) { | 
|---|
| 1080 | struct ttm_pool_type *pt; | 
|---|
| 1081 |  | 
|---|
| 1082 | /* Initialize only pool types which are actually used */ | 
|---|
| 1083 | pt = ttm_pool_select_type(pool, caching: i, order: j); | 
|---|
| 1084 | if (pt != &pool->caching[i].orders[j]) | 
|---|
| 1085 | continue; | 
|---|
| 1086 |  | 
|---|
| 1087 | ttm_pool_type_init(pt, pool, caching: i, order: j); | 
|---|
| 1088 | } | 
|---|
| 1089 | } | 
|---|
| 1090 | } | 
|---|
| 1091 | EXPORT_SYMBOL(ttm_pool_init); | 
|---|
| 1092 |  | 
|---|
| 1093 | /** | 
|---|
| 1094 | * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete. | 
|---|
| 1095 | * | 
|---|
| 1096 | * This is useful to guarantee that all shrinker invocations have seen an | 
|---|
| 1097 | * update, before freeing memory, similar to rcu. | 
|---|
| 1098 | */ | 
|---|
| 1099 | static void ttm_pool_synchronize_shrinkers(void) | 
|---|
| 1100 | { | 
|---|
| 1101 | down_write(sem: &pool_shrink_rwsem); | 
|---|
| 1102 | up_write(sem: &pool_shrink_rwsem); | 
|---|
| 1103 | } | 
|---|
| 1104 |  | 
|---|
| 1105 | /** | 
|---|
| 1106 | * ttm_pool_fini - Cleanup a pool | 
|---|
| 1107 | * | 
|---|
| 1108 | * @pool: the pool to clean up | 
|---|
| 1109 | * | 
|---|
| 1110 | * Free all pages in the pool and unregister the types from the global | 
|---|
| 1111 | * shrinker. | 
|---|
| 1112 | */ | 
|---|
| 1113 | void ttm_pool_fini(struct ttm_pool *pool) | 
|---|
| 1114 | { | 
|---|
| 1115 | unsigned int i, j; | 
|---|
| 1116 |  | 
|---|
| 1117 | for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { | 
|---|
| 1118 | for (j = 0; j < NR_PAGE_ORDERS; ++j) { | 
|---|
| 1119 | struct ttm_pool_type *pt; | 
|---|
| 1120 |  | 
|---|
| 1121 | pt = ttm_pool_select_type(pool, caching: i, order: j); | 
|---|
| 1122 | if (pt != &pool->caching[i].orders[j]) | 
|---|
| 1123 | continue; | 
|---|
| 1124 |  | 
|---|
| 1125 | ttm_pool_type_fini(pt); | 
|---|
| 1126 | } | 
|---|
| 1127 | } | 
|---|
| 1128 |  | 
|---|
| 1129 | /* We removed the pool types from the LRU, but we need to also make sure | 
|---|
| 1130 | * that no shrinker is concurrently freeing pages from the pool. | 
|---|
| 1131 | */ | 
|---|
| 1132 | ttm_pool_synchronize_shrinkers(); | 
|---|
| 1133 | } | 
|---|
| 1134 | EXPORT_SYMBOL(ttm_pool_fini); | 
|---|
| 1135 |  | 
|---|
| 1136 | /* Free average pool number of pages.  */ | 
|---|
| 1137 | #define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS) | 
|---|
| 1138 |  | 
|---|
| 1139 | static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, | 
|---|
| 1140 | struct shrink_control *sc) | 
|---|
| 1141 | { | 
|---|
| 1142 | unsigned long num_freed = 0; | 
|---|
| 1143 |  | 
|---|
| 1144 | do | 
|---|
| 1145 | num_freed += ttm_pool_shrink(); | 
|---|
| 1146 | while (num_freed < sc->nr_to_scan && | 
|---|
| 1147 | atomic_long_read(v: &allocated_pages)); | 
|---|
| 1148 |  | 
|---|
| 1149 | sc->nr_scanned = num_freed; | 
|---|
| 1150 |  | 
|---|
| 1151 | return num_freed ?: SHRINK_STOP; | 
|---|
| 1152 | } | 
|---|
| 1153 |  | 
|---|
| 1154 | /* Return the number of pages available or SHRINK_EMPTY if we have none */ | 
|---|
| 1155 | static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, | 
|---|
| 1156 | struct shrink_control *sc) | 
|---|
| 1157 | { | 
|---|
| 1158 | unsigned long num_pages = atomic_long_read(v: &allocated_pages); | 
|---|
| 1159 |  | 
|---|
| 1160 | return num_pages ? num_pages : SHRINK_EMPTY; | 
|---|
| 1161 | } | 
|---|
| 1162 |  | 
|---|
| 1163 | #ifdef CONFIG_DEBUG_FS | 
|---|
| 1164 | /* Count the number of pages available in a pool_type */ | 
|---|
| 1165 | static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) | 
|---|
| 1166 | { | 
|---|
| 1167 | unsigned int count = 0; | 
|---|
| 1168 | struct page *p; | 
|---|
| 1169 |  | 
|---|
| 1170 | spin_lock(lock: &pt->lock); | 
|---|
| 1171 | /* Only used for debugfs, the overhead doesn't matter */ | 
|---|
| 1172 | list_for_each_entry(p, &pt->pages, lru) | 
|---|
| 1173 | ++count; | 
|---|
| 1174 | spin_unlock(lock: &pt->lock); | 
|---|
| 1175 |  | 
|---|
| 1176 | return count; | 
|---|
| 1177 | } | 
|---|
| 1178 |  | 
|---|
| 1179 | /* Print a nice header for the order */ | 
|---|
| 1180 | static void (struct seq_file *m) | 
|---|
| 1181 | { | 
|---|
| 1182 | unsigned int i; | 
|---|
| 1183 |  | 
|---|
| 1184 | seq_puts(m, s: "\t "); | 
|---|
| 1185 | for (i = 0; i < NR_PAGE_ORDERS; ++i) | 
|---|
| 1186 | seq_printf(m, fmt: " ---%2u---", i); | 
|---|
| 1187 | seq_puts(m, s: "\n"); | 
|---|
| 1188 | } | 
|---|
| 1189 |  | 
|---|
| 1190 | /* Dump information about the different pool types */ | 
|---|
| 1191 | static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, | 
|---|
| 1192 | struct seq_file *m) | 
|---|
| 1193 | { | 
|---|
| 1194 | unsigned int i; | 
|---|
| 1195 |  | 
|---|
| 1196 | for (i = 0; i < NR_PAGE_ORDERS; ++i) | 
|---|
| 1197 | seq_printf(m, fmt: " %8u", ttm_pool_type_count(pt: &pt[i])); | 
|---|
| 1198 | seq_puts(m, s: "\n"); | 
|---|
| 1199 | } | 
|---|
| 1200 |  | 
|---|
| 1201 | /* Dump the total amount of allocated pages */ | 
|---|
| 1202 | static void (struct seq_file *m) | 
|---|
| 1203 | { | 
|---|
| 1204 | seq_printf(m, fmt: "\ntotal\t: %8lu of %8lu\n", | 
|---|
| 1205 | atomic_long_read(v: &allocated_pages), page_pool_size); | 
|---|
| 1206 | } | 
|---|
| 1207 |  | 
|---|
| 1208 | /* Dump the information for the global pools */ | 
|---|
| 1209 | static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) | 
|---|
| 1210 | { | 
|---|
| 1211 | ttm_pool_debugfs_header(m); | 
|---|
| 1212 |  | 
|---|
| 1213 | spin_lock(lock: &shrinker_lock); | 
|---|
| 1214 | seq_puts(m, s: "wc\t:"); | 
|---|
| 1215 | ttm_pool_debugfs_orders(pt: global_write_combined, m); | 
|---|
| 1216 | seq_puts(m, s: "uc\t:"); | 
|---|
| 1217 | ttm_pool_debugfs_orders(pt: global_uncached, m); | 
|---|
| 1218 | seq_puts(m, s: "wc 32\t:"); | 
|---|
| 1219 | ttm_pool_debugfs_orders(pt: global_dma32_write_combined, m); | 
|---|
| 1220 | seq_puts(m, s: "uc 32\t:"); | 
|---|
| 1221 | ttm_pool_debugfs_orders(pt: global_dma32_uncached, m); | 
|---|
| 1222 | spin_unlock(lock: &shrinker_lock); | 
|---|
| 1223 |  | 
|---|
| 1224 | ttm_pool_debugfs_footer(m); | 
|---|
| 1225 |  | 
|---|
| 1226 | return 0; | 
|---|
| 1227 | } | 
|---|
| 1228 | DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals); | 
|---|
| 1229 |  | 
|---|
| 1230 | /** | 
|---|
| 1231 | * ttm_pool_debugfs - Debugfs dump function for a pool | 
|---|
| 1232 | * | 
|---|
| 1233 | * @pool: the pool to dump the information for | 
|---|
| 1234 | * @m: seq_file to dump to | 
|---|
| 1235 | * | 
|---|
| 1236 | * Make a debugfs dump with the per pool and global information. | 
|---|
| 1237 | */ | 
|---|
| 1238 | int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) | 
|---|
| 1239 | { | 
|---|
| 1240 | unsigned int i; | 
|---|
| 1241 |  | 
|---|
| 1242 | if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) { | 
|---|
| 1243 | seq_puts(m, s: "unused\n"); | 
|---|
| 1244 | return 0; | 
|---|
| 1245 | } | 
|---|
| 1246 |  | 
|---|
| 1247 | ttm_pool_debugfs_header(m); | 
|---|
| 1248 |  | 
|---|
| 1249 | spin_lock(lock: &shrinker_lock); | 
|---|
| 1250 | for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { | 
|---|
| 1251 | if (!ttm_pool_select_type(pool, caching: i, order: 0)) | 
|---|
| 1252 | continue; | 
|---|
| 1253 | if (pool->use_dma_alloc) | 
|---|
| 1254 | seq_puts(m, s: "DMA "); | 
|---|
| 1255 | else | 
|---|
| 1256 | seq_printf(m, fmt: "N%d ", pool->nid); | 
|---|
| 1257 | switch (i) { | 
|---|
| 1258 | case ttm_cached: | 
|---|
| 1259 | seq_puts(m, s: "\t:"); | 
|---|
| 1260 | break; | 
|---|
| 1261 | case ttm_write_combined: | 
|---|
| 1262 | seq_puts(m, s: "wc\t:"); | 
|---|
| 1263 | break; | 
|---|
| 1264 | case ttm_uncached: | 
|---|
| 1265 | seq_puts(m, s: "uc\t:"); | 
|---|
| 1266 | break; | 
|---|
| 1267 | } | 
|---|
| 1268 | ttm_pool_debugfs_orders(pt: pool->caching[i].orders, m); | 
|---|
| 1269 | } | 
|---|
| 1270 | spin_unlock(lock: &shrinker_lock); | 
|---|
| 1271 |  | 
|---|
| 1272 | ttm_pool_debugfs_footer(m); | 
|---|
| 1273 | return 0; | 
|---|
| 1274 | } | 
|---|
| 1275 | EXPORT_SYMBOL(ttm_pool_debugfs); | 
|---|
| 1276 |  | 
|---|
| 1277 | /* Test the shrinker functions and dump the result */ | 
|---|
| 1278 | static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data) | 
|---|
| 1279 | { | 
|---|
| 1280 | struct shrink_control sc = { | 
|---|
| 1281 | .gfp_mask = GFP_NOFS, | 
|---|
| 1282 | .nr_to_scan = TTM_SHRINKER_BATCH, | 
|---|
| 1283 | }; | 
|---|
| 1284 | unsigned long count; | 
|---|
| 1285 |  | 
|---|
| 1286 | fs_reclaim_acquire(GFP_KERNEL); | 
|---|
| 1287 | count = ttm_pool_shrinker_count(shrink: mm_shrinker, sc: &sc); | 
|---|
| 1288 | seq_printf(m, fmt: "%lu/%lu\n", count, | 
|---|
| 1289 | ttm_pool_shrinker_scan(shrink: mm_shrinker, sc: &sc)); | 
|---|
| 1290 | fs_reclaim_release(GFP_KERNEL); | 
|---|
| 1291 |  | 
|---|
| 1292 | return 0; | 
|---|
| 1293 | } | 
|---|
| 1294 | DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink); | 
|---|
| 1295 |  | 
|---|
| 1296 | #endif | 
|---|
| 1297 |  | 
|---|
| 1298 | /** | 
|---|
| 1299 | * ttm_pool_mgr_init - Initialize globals | 
|---|
| 1300 | * | 
|---|
| 1301 | * @num_pages: default number of pages | 
|---|
| 1302 | * | 
|---|
| 1303 | * Initialize the global locks and lists for the MM shrinker. | 
|---|
| 1304 | */ | 
|---|
| 1305 | int ttm_pool_mgr_init(unsigned long num_pages) | 
|---|
| 1306 | { | 
|---|
| 1307 | unsigned int i; | 
|---|
| 1308 |  | 
|---|
| 1309 | if (!page_pool_size) | 
|---|
| 1310 | page_pool_size = num_pages; | 
|---|
| 1311 |  | 
|---|
| 1312 | spin_lock_init(&shrinker_lock); | 
|---|
| 1313 | INIT_LIST_HEAD(list: &shrinker_list); | 
|---|
| 1314 |  | 
|---|
| 1315 | for (i = 0; i < NR_PAGE_ORDERS; ++i) { | 
|---|
| 1316 | ttm_pool_type_init(pt: &global_write_combined[i], NULL, | 
|---|
| 1317 | caching: ttm_write_combined, order: i); | 
|---|
| 1318 | ttm_pool_type_init(pt: &global_uncached[i], NULL, caching: ttm_uncached, order: i); | 
|---|
| 1319 |  | 
|---|
| 1320 | ttm_pool_type_init(pt: &global_dma32_write_combined[i], NULL, | 
|---|
| 1321 | caching: ttm_write_combined, order: i); | 
|---|
| 1322 | ttm_pool_type_init(pt: &global_dma32_uncached[i], NULL, | 
|---|
| 1323 | caching: ttm_uncached, order: i); | 
|---|
| 1324 | } | 
|---|
| 1325 |  | 
|---|
| 1326 | #ifdef CONFIG_DEBUG_FS | 
|---|
| 1327 | debugfs_create_file( "page_pool", 0444, ttm_debugfs_root, NULL, | 
|---|
| 1328 | &ttm_pool_debugfs_globals_fops); | 
|---|
| 1329 | debugfs_create_file( "page_pool_shrink", 0400, ttm_debugfs_root, NULL, | 
|---|
| 1330 | &ttm_pool_debugfs_shrink_fops); | 
|---|
| 1331 | #ifdef CONFIG_FAULT_INJECTION | 
|---|
| 1332 | fault_create_debugfs_attr( "backup_fault_inject", ttm_debugfs_root, | 
|---|
| 1333 | &backup_fault_inject); | 
|---|
| 1334 | #endif | 
|---|
| 1335 | #endif | 
|---|
| 1336 |  | 
|---|
| 1337 | mm_shrinker = shrinker_alloc(flags: 0, fmt: "drm-ttm_pool"); | 
|---|
| 1338 | if (!mm_shrinker) | 
|---|
| 1339 | return -ENOMEM; | 
|---|
| 1340 |  | 
|---|
| 1341 | mm_shrinker->count_objects = ttm_pool_shrinker_count; | 
|---|
| 1342 | mm_shrinker->scan_objects = ttm_pool_shrinker_scan; | 
|---|
| 1343 | mm_shrinker->batch = TTM_SHRINKER_BATCH; | 
|---|
| 1344 | mm_shrinker->seeks = 1; | 
|---|
| 1345 |  | 
|---|
| 1346 | shrinker_register(shrinker: mm_shrinker); | 
|---|
| 1347 |  | 
|---|
| 1348 | return 0; | 
|---|
| 1349 | } | 
|---|
| 1350 |  | 
|---|
| 1351 | /** | 
|---|
| 1352 | * ttm_pool_mgr_fini - Finalize globals | 
|---|
| 1353 | * | 
|---|
| 1354 | * Cleanup the global pools and unregister the MM shrinker. | 
|---|
| 1355 | */ | 
|---|
| 1356 | void ttm_pool_mgr_fini(void) | 
|---|
| 1357 | { | 
|---|
| 1358 | unsigned int i; | 
|---|
| 1359 |  | 
|---|
| 1360 | for (i = 0; i < NR_PAGE_ORDERS; ++i) { | 
|---|
| 1361 | ttm_pool_type_fini(pt: &global_write_combined[i]); | 
|---|
| 1362 | ttm_pool_type_fini(pt: &global_uncached[i]); | 
|---|
| 1363 |  | 
|---|
| 1364 | ttm_pool_type_fini(pt: &global_dma32_write_combined[i]); | 
|---|
| 1365 | ttm_pool_type_fini(pt: &global_dma32_uncached[i]); | 
|---|
| 1366 | } | 
|---|
| 1367 |  | 
|---|
| 1368 | shrinker_free(shrinker: mm_shrinker); | 
|---|
| 1369 | WARN_ON(!list_empty(&shrinker_list)); | 
|---|
| 1370 | } | 
|---|
| 1371 |  | 
|---|