| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H | 
|---|
| 3 | #define _NET_PAGE_POOL_MEMORY_PROVIDER_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <net/netmem.h> | 
|---|
| 6 | #include <net/page_pool/types.h> | 
|---|
| 7 |  | 
|---|
| 8 | struct netdev_rx_queue; | 
|---|
| 9 | struct netlink_ext_ack; | 
|---|
| 10 | struct sk_buff; | 
|---|
| 11 |  | 
|---|
| 12 | struct memory_provider_ops { | 
|---|
| 13 | netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp); | 
|---|
| 14 | bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem); | 
|---|
| 15 | int (*init)(struct page_pool *pool); | 
|---|
| 16 | void (*destroy)(struct page_pool *pool); | 
|---|
| 17 | int (*nl_fill)(void *mp_priv, struct sk_buff *rsp, | 
|---|
| 18 | struct netdev_rx_queue *rxq); | 
|---|
| 19 | void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq); | 
|---|
| 20 | }; | 
|---|
| 21 |  | 
|---|
| 22 | bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr); | 
|---|
| 23 | void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov); | 
|---|
| 24 | void net_mp_niov_clear_page_pool(struct net_iov *niov); | 
|---|
| 25 |  | 
|---|
| 26 | int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, | 
|---|
| 27 | struct pp_memory_provider_params *p); | 
|---|
| 28 | int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx, | 
|---|
| 29 | const struct pp_memory_provider_params *p, | 
|---|
| 30 | struct netlink_ext_ack *extack); | 
|---|
| 31 | void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, | 
|---|
| 32 | struct pp_memory_provider_params *old_p); | 
|---|
| 33 | void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx, | 
|---|
| 34 | const struct pp_memory_provider_params *old_p); | 
|---|
| 35 |  | 
|---|
| 36 | /** | 
|---|
| 37 | * net_mp_netmem_place_in_cache() - give a netmem to a page pool | 
|---|
| 38 | * @pool:      the page pool to place the netmem into | 
|---|
| 39 | * @netmem:    netmem to give | 
|---|
| 40 | * | 
|---|
| 41 | * Push an accounted netmem into the page pool's allocation cache. The caller | 
|---|
| 42 | * must ensure that there is space in the cache. It should only be called off | 
|---|
| 43 | * the mp_ops->alloc_netmems() path. | 
|---|
| 44 | */ | 
|---|
| 45 | static inline void net_mp_netmem_place_in_cache(struct page_pool *pool, | 
|---|
| 46 | netmem_ref netmem) | 
|---|
| 47 | { | 
|---|
| 48 | pool->alloc.cache[pool->alloc.count++] = netmem; | 
|---|
| 49 | } | 
|---|
| 50 |  | 
|---|
| 51 | #endif | 
|---|
| 52 |  | 
|---|