1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * include/linux/idr.h
4 *
5 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
6 * Copyright (C) 2002 by Concurrent Computer Corporation
7 *
8 * Small id to pointer translation service avoiding fixed sized
9 * tables.
10 */
11
12#ifndef __IDR_H__
13#define __IDR_H__
14
15#include <linux/radix-tree.h>
16#include <linux/gfp.h>
17#include <linux/percpu.h>
18#include <linux/cleanup.h>
19
20struct idr {
21 struct radix_tree_root idr_rt;
22 unsigned int idr_base;
23 unsigned int idr_next;
24};
25
26/*
27 * The IDR API does not expose the tagging functionality of the radix tree
28 * to users. Use tag 0 to track whether a node has free space below it.
29 */
30#define IDR_FREE 0
31
32/* Set the IDR flag and the IDR_FREE tag */
33#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
34 (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
35
36#define IDR_INIT_BASE(name, base) { \
37 .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
38 .idr_base = (base), \
39 .idr_next = 0, \
40}
41
42/**
43 * IDR_INIT() - Initialise an IDR.
44 * @name: Name of IDR.
45 *
46 * A freshly-initialised IDR contains no IDs.
47 */
48#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
49
50/**
51 * DEFINE_IDR() - Define a statically-allocated IDR.
52 * @name: Name of IDR.
53 *
54 * An IDR defined using this macro is ready for use with no additional
55 * initialisation required. It contains no IDs.
56 */
57#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
58
59/**
60 * idr_get_cursor - Return the current position of the cyclic allocator
61 * @idr: idr handle
62 *
63 * The value returned is the value that will be next returned from
64 * idr_alloc_cyclic() if it is free (otherwise the search will start from
65 * this position).
66 */
67static inline unsigned int idr_get_cursor(const struct idr *idr)
68{
69 return READ_ONCE(idr->idr_next);
70}
71
72/**
73 * idr_set_cursor - Set the current position of the cyclic allocator
74 * @idr: idr handle
75 * @val: new position
76 *
77 * The next call to idr_alloc_cyclic() will return @val if it is free
78 * (otherwise the search will start from this position).
79 */
80static inline void idr_set_cursor(struct idr *idr, unsigned int val)
81{
82 WRITE_ONCE(idr->idr_next, val);
83}
84
85/**
86 * DOC: idr sync
87 * idr synchronization (stolen from radix-tree.h)
88 *
89 * idr_find() is able to be called locklessly, using RCU. The caller must
90 * ensure calls to this function are made within rcu_read_lock() regions.
91 * Other readers (lock-free or otherwise) and modifications may be running
92 * concurrently.
93 *
94 * It is still required that the caller manage the synchronization and
95 * lifetimes of the items. So if RCU lock-free lookups are used, typically
96 * this would mean that the items have their own locks, or are amenable to
97 * lock-free access; and that the items are freed by RCU (or only freed after
98 * having been deleted from the idr tree *and* a synchronize_rcu() grace
99 * period).
100 */
101
102#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
103#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
104#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
105#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
106#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
107#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
108#define idr_lock_irqsave(idr, flags) \
109 xa_lock_irqsave(&(idr)->idr_rt, flags)
110#define idr_unlock_irqrestore(idr, flags) \
111 xa_unlock_irqrestore(&(idr)->idr_rt, flags)
112
113void idr_preload(gfp_t gfp_mask);
114
115int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
116int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
117 unsigned long max, gfp_t);
118int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
119void *idr_remove(struct idr *, unsigned long id);
120void *idr_find(const struct idr *, unsigned long id);
121int idr_for_each(const struct idr *,
122 int (*fn)(int id, void *p, void *data), void *data);
123void *idr_get_next(struct idr *, int *nextid);
124void *idr_get_next_ul(struct idr *, unsigned long *nextid);
125void *idr_replace(struct idr *, void *, unsigned long id);
126void idr_destroy(struct idr *);
127
128struct __class_idr {
129 struct idr *idr;
130 int id;
131};
132
133#define idr_null ((struct __class_idr){ NULL, -1 })
134#define take_idr_id(id) __get_and_null(id, idr_null)
135
136DEFINE_CLASS(idr_alloc, struct __class_idr,
137 if (_T.id >= 0) idr_remove(_T.idr, _T.id),
138 ((struct __class_idr){
139 .idr = idr,
140 .id = idr_alloc(idr, ptr, start, end, gfp),
141 }),
142 struct idr *idr, void *ptr, int start, int end, gfp_t gfp);
143
144/**
145 * idr_init_base() - Initialise an IDR.
146 * @idr: IDR handle.
147 * @base: The base value for the IDR.
148 *
149 * This variation of idr_init() creates an IDR which will allocate IDs
150 * starting at %base.
151 */
152static inline void idr_init_base(struct idr *idr, int base)
153{
154 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
155 idr->idr_base = base;
156 idr->idr_next = 0;
157}
158
159/**
160 * idr_init() - Initialise an IDR.
161 * @idr: IDR handle.
162 *
163 * Initialise a dynamically allocated IDR. To initialise a
164 * statically allocated IDR, use DEFINE_IDR().
165 */
166static inline void idr_init(struct idr *idr)
167{
168 idr_init_base(idr, base: 0);
169}
170
171/**
172 * idr_is_empty() - Are there any IDs allocated?
173 * @idr: IDR handle.
174 *
175 * Return: %true if any IDs have been allocated from this IDR.
176 */
177static inline bool idr_is_empty(const struct idr *idr)
178{
179 return radix_tree_empty(root: &idr->idr_rt) &&
180 radix_tree_tagged(&idr->idr_rt, IDR_FREE);
181}
182
183/**
184 * idr_preload_end - end preload section started with idr_preload()
185 *
186 * Each idr_preload() should be matched with an invocation of this
187 * function. See idr_preload() for details.
188 */
189static inline void idr_preload_end(void)
190{
191 local_unlock(&radix_tree_preloads.lock);
192}
193
194/**
195 * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
196 * @idr: IDR handle.
197 * @entry: The type * to use as cursor
198 * @id: Entry ID.
199 *
200 * @entry and @id do not need to be initialized before the loop, and
201 * after normal termination @entry is left with the value NULL. This
202 * is convenient for a "not found" value.
203 */
204#define idr_for_each_entry(idr, entry, id) \
205 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
206
207/**
208 * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
209 * @idr: IDR handle.
210 * @entry: The type * to use as cursor.
211 * @tmp: A temporary placeholder for ID.
212 * @id: Entry ID.
213 *
214 * @entry and @id do not need to be initialized before the loop, and
215 * after normal termination @entry is left with the value NULL. This
216 * is convenient for a "not found" value.
217 */
218#define idr_for_each_entry_ul(idr, entry, tmp, id) \
219 for (tmp = 0, id = 0; \
220 ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
221 tmp = id, ++id)
222
223/**
224 * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
225 * @idr: IDR handle.
226 * @entry: The type * to use as a cursor.
227 * @id: Entry ID.
228 *
229 * Continue to iterate over entries, continuing after the current position.
230 */
231#define idr_for_each_entry_continue(idr, entry, id) \
232 for ((entry) = idr_get_next((idr), &(id)); \
233 entry; \
234 ++id, (entry) = idr_get_next((idr), &(id)))
235
236/**
237 * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
238 * @idr: IDR handle.
239 * @entry: The type * to use as a cursor.
240 * @tmp: A temporary placeholder for ID.
241 * @id: Entry ID.
242 *
243 * Continue to iterate over entries, continuing after the current position.
244 * After normal termination @entry is left with the value NULL. This
245 * is convenient for a "not found" value.
246 */
247#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
248 for (tmp = id; \
249 ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
250 tmp = id, ++id)
251
252/*
253 * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
254 */
255#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
256#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
257#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
258
259struct ida_bitmap {
260 unsigned long bitmap[IDA_BITMAP_LONGS];
261};
262
263struct ida {
264 struct xarray xa;
265};
266
267#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
268
269#define IDA_INIT(name) { \
270 .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
271}
272#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
273
274int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
275void ida_free(struct ida *, unsigned int id);
276void ida_destroy(struct ida *ida);
277int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max);
278
279/**
280 * ida_alloc() - Allocate an unused ID.
281 * @ida: IDA handle.
282 * @gfp: Memory allocation flags.
283 *
284 * Allocate an ID between 0 and %INT_MAX, inclusive.
285 *
286 * Context: Any context. It is safe to call this function without
287 * locking in your code.
288 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
289 * or %-ENOSPC if there are no free IDs.
290 */
291static inline int ida_alloc(struct ida *ida, gfp_t gfp)
292{
293 return ida_alloc_range(ida, min: 0, max: ~0, gfp);
294}
295
296/**
297 * ida_alloc_min() - Allocate an unused ID.
298 * @ida: IDA handle.
299 * @min: Lowest ID to allocate.
300 * @gfp: Memory allocation flags.
301 *
302 * Allocate an ID between @min and %INT_MAX, inclusive.
303 *
304 * Context: Any context. It is safe to call this function without
305 * locking in your code.
306 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
307 * or %-ENOSPC if there are no free IDs.
308 */
309static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
310{
311 return ida_alloc_range(ida, min, max: ~0, gfp);
312}
313
314/**
315 * ida_alloc_max() - Allocate an unused ID.
316 * @ida: IDA handle.
317 * @max: Highest ID to allocate.
318 * @gfp: Memory allocation flags.
319 *
320 * Allocate an ID between 0 and @max, inclusive.
321 *
322 * Context: Any context. It is safe to call this function without
323 * locking in your code.
324 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
325 * or %-ENOSPC if there are no free IDs.
326 */
327static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
328{
329 return ida_alloc_range(ida, min: 0, max, gfp);
330}
331
332static inline void ida_init(struct ida *ida)
333{
334 xa_init_flags(xa: &ida->xa, IDA_INIT_FLAGS);
335}
336
337static inline bool ida_is_empty(const struct ida *ida)
338{
339 return xa_empty(xa: &ida->xa);
340}
341
342static inline bool ida_exists(struct ida *ida, unsigned int id)
343{
344 return ida_find_first_range(ida, min: id, max: id) == id;
345}
346
347static inline int ida_find_first(struct ida *ida)
348{
349 return ida_find_first_range(ida, min: 0, max: ~0);
350}
351#endif /* __IDR_H__ */
352