| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (c) 2024, Google LLC. | 
|---|
| 4 | * Pasha Tatashin <pasha.tatashin@soleen.com> | 
|---|
| 5 | */ | 
|---|
| 6 | #include "iommu-pages.h" | 
|---|
| 7 | #include <linux/gfp.h> | 
|---|
| 8 | #include <linux/mm.h> | 
|---|
| 9 |  | 
|---|
| 10 | #define IOPTDESC_MATCH(pg_elm, elm)                    \ | 
|---|
| 11 | static_assert(offsetof(struct page, pg_elm) == \ | 
|---|
| 12 | offsetof(struct ioptdesc, elm)) | 
|---|
| 13 | IOPTDESC_MATCH(flags, __page_flags); | 
|---|
| 14 | IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */ | 
|---|
| 15 | IOPTDESC_MATCH(mapping, __page_mapping); | 
|---|
| 16 | IOPTDESC_MATCH(private, _private); | 
|---|
| 17 | IOPTDESC_MATCH(page_type, __page_type); | 
|---|
| 18 | IOPTDESC_MATCH(_refcount, __page_refcount); | 
|---|
| 19 | #ifdef CONFIG_MEMCG | 
|---|
| 20 | IOPTDESC_MATCH(memcg_data, memcg_data); | 
|---|
| 21 | #endif | 
|---|
| 22 | #undef IOPTDESC_MATCH | 
|---|
| 23 | static_assert(sizeof(struct ioptdesc) <= sizeof(struct page)); | 
|---|
| 24 |  | 
|---|
| 25 | /** | 
|---|
| 26 | * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from | 
|---|
| 27 | *                             specific NUMA node | 
|---|
| 28 | * @nid: memory NUMA node id | 
|---|
| 29 | * @gfp: buddy allocator flags | 
|---|
| 30 | * @size: Memory size to allocate, rounded up to a power of 2 | 
|---|
| 31 | * | 
|---|
| 32 | * Returns the virtual address of the allocated page. The page must be freed | 
|---|
| 33 | * either by calling iommu_free_pages() or via iommu_put_pages_list(). The | 
|---|
| 34 | * returned allocation is round_up_pow_two(size) big, and is physically aligned | 
|---|
| 35 | * to its size. | 
|---|
| 36 | */ | 
|---|
| 37 | void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size) | 
|---|
| 38 | { | 
|---|
| 39 | unsigned long pgcnt; | 
|---|
| 40 | struct folio *folio; | 
|---|
| 41 | unsigned int order; | 
|---|
| 42 |  | 
|---|
| 43 | /* This uses page_address() on the memory. */ | 
|---|
| 44 | if (WARN_ON(gfp & __GFP_HIGHMEM)) | 
|---|
| 45 | return NULL; | 
|---|
| 46 |  | 
|---|
| 47 | /* | 
|---|
| 48 | * Currently sub page allocations result in a full page being returned. | 
|---|
| 49 | */ | 
|---|
| 50 | order = get_order(size); | 
|---|
| 51 |  | 
|---|
| 52 | /* | 
|---|
| 53 | * __folio_alloc_node() does not handle NUMA_NO_NODE like | 
|---|
| 54 | * alloc_pages_node() did. | 
|---|
| 55 | */ | 
|---|
| 56 | if (nid == NUMA_NO_NODE) | 
|---|
| 57 | nid = numa_mem_id(); | 
|---|
| 58 |  | 
|---|
| 59 | folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid); | 
|---|
| 60 | if (unlikely(!folio)) | 
|---|
| 61 | return NULL; | 
|---|
| 62 |  | 
|---|
| 63 | /* | 
|---|
| 64 | * All page allocations that should be reported to as "iommu-pagetables" | 
|---|
| 65 | * to userspace must use one of the functions below. This includes | 
|---|
| 66 | * allocations of page-tables and other per-iommu_domain configuration | 
|---|
| 67 | * structures. | 
|---|
| 68 | * | 
|---|
| 69 | * This is necessary for the proper accounting as IOMMU state can be | 
|---|
| 70 | * rather large, i.e. multiple gigabytes in size. | 
|---|
| 71 | */ | 
|---|
| 72 | pgcnt = 1UL << order; | 
|---|
| 73 | mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt); | 
|---|
| 74 | lruvec_stat_mod_folio(folio, idx: NR_SECONDARY_PAGETABLE, val: pgcnt); | 
|---|
| 75 |  | 
|---|
| 76 | return folio_address(folio); | 
|---|
| 77 | } | 
|---|
| 78 | EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz); | 
|---|
| 79 |  | 
|---|
| 80 | static void __iommu_free_desc(struct ioptdesc *iopt) | 
|---|
| 81 | { | 
|---|
| 82 | struct folio *folio = ioptdesc_folio(iopt); | 
|---|
| 83 | const unsigned long pgcnt = 1UL << folio_order(folio); | 
|---|
| 84 |  | 
|---|
| 85 | mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt); | 
|---|
| 86 | lruvec_stat_mod_folio(folio, idx: NR_SECONDARY_PAGETABLE, val: -pgcnt); | 
|---|
| 87 | folio_put(folio); | 
|---|
| 88 | } | 
|---|
| 89 |  | 
|---|
| 90 | /** | 
|---|
| 91 | * iommu_free_pages - free pages | 
|---|
| 92 | * @virt: virtual address of the page to be freed. | 
|---|
| 93 | * | 
|---|
| 94 | * The page must have have been allocated by iommu_alloc_pages_node_sz() | 
|---|
| 95 | */ | 
|---|
| 96 | void iommu_free_pages(void *virt) | 
|---|
| 97 | { | 
|---|
| 98 | if (!virt) | 
|---|
| 99 | return; | 
|---|
| 100 | __iommu_free_desc(iopt: virt_to_ioptdesc(virt)); | 
|---|
| 101 | } | 
|---|
| 102 | EXPORT_SYMBOL_GPL(iommu_free_pages); | 
|---|
| 103 |  | 
|---|
| 104 | /** | 
|---|
| 105 | * iommu_put_pages_list - free a list of pages. | 
|---|
| 106 | * @list: The list of pages to be freed | 
|---|
| 107 | * | 
|---|
| 108 | * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the | 
|---|
| 109 | * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit | 
|---|
| 110 | * the list if it expects to use it again. | 
|---|
| 111 | */ | 
|---|
| 112 | void iommu_put_pages_list(struct iommu_pages_list *list) | 
|---|
| 113 | { | 
|---|
| 114 | struct ioptdesc *iopt, *tmp; | 
|---|
| 115 |  | 
|---|
| 116 | list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm) | 
|---|
| 117 | __iommu_free_desc(iopt); | 
|---|
| 118 | } | 
|---|
| 119 | EXPORT_SYMBOL_GPL(iommu_put_pages_list); | 
|---|
| 120 |  | 
|---|