| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Page fragment allocator |
| 3 | * |
| 4 | * Page Fragment: |
| 5 | * An arbitrary-length arbitrary-offset area of memory which resides within a |
| 6 | * 0 or higher order page. Multiple fragments within that page are |
| 7 | * individually refcounted, in the page's reference counter. |
| 8 | * |
| 9 | * The page_frag functions provide a simple allocation framework for page |
| 10 | * fragments. This is used by the network stack and network device drivers to |
| 11 | * provide a backing region of memory for use as either an sk_buff->head, or to |
| 12 | * be used in the "frags" portion of skb_shared_info. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/build_bug.h> |
| 16 | #include <linux/export.h> |
| 17 | #include <linux/gfp_types.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/page_frag_cache.h> |
| 21 | #include "internal.h" |
| 22 | |
| 23 | static unsigned long encoded_page_create(struct page *page, unsigned int order, |
| 24 | bool pfmemalloc) |
| 25 | { |
| 26 | BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_FRAG_CACHE_ORDER_MASK); |
| 27 | BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT >= PAGE_SIZE); |
| 28 | |
| 29 | return (unsigned long)page_address(page) | |
| 30 | (order & PAGE_FRAG_CACHE_ORDER_MASK) | |
| 31 | ((unsigned long)pfmemalloc * PAGE_FRAG_CACHE_PFMEMALLOC_BIT); |
| 32 | } |
| 33 | |
| 34 | static unsigned long encoded_page_decode_order(unsigned long encoded_page) |
| 35 | { |
| 36 | return encoded_page & PAGE_FRAG_CACHE_ORDER_MASK; |
| 37 | } |
| 38 | |
| 39 | static void *encoded_page_decode_virt(unsigned long encoded_page) |
| 40 | { |
| 41 | return (void *)(encoded_page & PAGE_MASK); |
| 42 | } |
| 43 | |
| 44 | static struct page *encoded_page_decode_page(unsigned long encoded_page) |
| 45 | { |
| 46 | return virt_to_page((void *)encoded_page); |
| 47 | } |
| 48 | |
| 49 | static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, |
| 50 | gfp_t gfp_mask) |
| 51 | { |
| 52 | unsigned long order = PAGE_FRAG_CACHE_MAX_ORDER; |
| 53 | struct page *page = NULL; |
| 54 | gfp_t gfp = gfp_mask; |
| 55 | |
| 56 | #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) |
| 57 | gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | |
| 58 | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; |
| 59 | page = __alloc_pages(gfp_mask, PAGE_FRAG_CACHE_MAX_ORDER, |
| 60 | numa_mem_id(), NULL); |
| 61 | #endif |
| 62 | if (unlikely(!page)) { |
| 63 | page = __alloc_pages(gfp, 0, numa_mem_id(), NULL); |
| 64 | order = 0; |
| 65 | } |
| 66 | |
| 67 | nc->encoded_page = page ? |
| 68 | encoded_page_create(page, order, pfmemalloc: page_is_pfmemalloc(page)) : 0; |
| 69 | |
| 70 | return page; |
| 71 | } |
| 72 | |
| 73 | void page_frag_cache_drain(struct page_frag_cache *nc) |
| 74 | { |
| 75 | if (!nc->encoded_page) |
| 76 | return; |
| 77 | |
| 78 | __page_frag_cache_drain(page: encoded_page_decode_page(encoded_page: nc->encoded_page), |
| 79 | count: nc->pagecnt_bias); |
| 80 | nc->encoded_page = 0; |
| 81 | } |
| 82 | EXPORT_SYMBOL(page_frag_cache_drain); |
| 83 | |
| 84 | void __page_frag_cache_drain(struct page *page, unsigned int count) |
| 85 | { |
| 86 | VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); |
| 87 | |
| 88 | if (page_ref_sub_and_test(page, nr: count)) |
| 89 | free_frozen_pages(page, order: compound_order(page)); |
| 90 | } |
| 91 | EXPORT_SYMBOL(__page_frag_cache_drain); |
| 92 | |
| 93 | void *__page_frag_alloc_align(struct page_frag_cache *nc, |
| 94 | unsigned int fragsz, gfp_t gfp_mask, |
| 95 | unsigned int align_mask) |
| 96 | { |
| 97 | unsigned long encoded_page = nc->encoded_page; |
| 98 | unsigned int size, offset; |
| 99 | struct page *page; |
| 100 | |
| 101 | if (unlikely(!encoded_page)) { |
| 102 | refill: |
| 103 | page = __page_frag_cache_refill(nc, gfp_mask); |
| 104 | if (!page) |
| 105 | return NULL; |
| 106 | |
| 107 | encoded_page = nc->encoded_page; |
| 108 | |
| 109 | /* Even if we own the page, we do not use atomic_set(). |
| 110 | * This would break get_page_unless_zero() users. |
| 111 | */ |
| 112 | page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); |
| 113 | |
| 114 | /* reset page count bias and offset to start of new frag */ |
| 115 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
| 116 | nc->offset = 0; |
| 117 | } |
| 118 | |
| 119 | size = PAGE_SIZE << encoded_page_decode_order(encoded_page); |
| 120 | offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask); |
| 121 | if (unlikely(offset + fragsz > size)) { |
| 122 | if (unlikely(fragsz > PAGE_SIZE)) { |
| 123 | /* |
| 124 | * The caller is trying to allocate a fragment |
| 125 | * with fragsz > PAGE_SIZE but the cache isn't big |
| 126 | * enough to satisfy the request, this may |
| 127 | * happen in low memory conditions. |
| 128 | * We don't release the cache page because |
| 129 | * it could make memory pressure worse |
| 130 | * so we simply return NULL here. |
| 131 | */ |
| 132 | return NULL; |
| 133 | } |
| 134 | |
| 135 | page = encoded_page_decode_page(encoded_page); |
| 136 | |
| 137 | if (!page_ref_sub_and_test(page, nr: nc->pagecnt_bias)) |
| 138 | goto refill; |
| 139 | |
| 140 | if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) { |
| 141 | free_frozen_pages(page, |
| 142 | order: encoded_page_decode_order(encoded_page)); |
| 143 | goto refill; |
| 144 | } |
| 145 | |
| 146 | /* OK, page count is 0, we can safely set it */ |
| 147 | set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); |
| 148 | |
| 149 | /* reset page count bias and offset to start of new frag */ |
| 150 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
| 151 | offset = 0; |
| 152 | } |
| 153 | |
| 154 | nc->pagecnt_bias--; |
| 155 | nc->offset = offset + fragsz; |
| 156 | |
| 157 | return encoded_page_decode_virt(encoded_page) + offset; |
| 158 | } |
| 159 | EXPORT_SYMBOL(__page_frag_alloc_align); |
| 160 | |
| 161 | /* |
| 162 | * Frees a page fragment allocated out of either a compound or order 0 page. |
| 163 | */ |
| 164 | void page_frag_free(void *addr) |
| 165 | { |
| 166 | struct page *page = virt_to_head_page(x: addr); |
| 167 | |
| 168 | if (unlikely(put_page_testzero(page))) |
| 169 | free_frozen_pages(page, order: compound_order(page)); |
| 170 | } |
| 171 | EXPORT_SYMBOL(page_frag_free); |
| 172 | |