1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
5#include <linux/bug.h>
6#include <linux/kasan-enabled.h>
7#include <linux/kasan-tags.h>
8#include <linux/kernel.h>
9#include <linux/static_key.h>
10#include <linux/types.h>
11
12struct kmem_cache;
13struct page;
14struct slab;
15struct vm_struct;
16struct task_struct;
17
18#ifdef CONFIG_KASAN
19
20#include <linux/linkage.h>
21#include <asm/kasan.h>
22
23#endif
24
25typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26
27#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
28#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
29#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
30#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31
32#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
33#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
34
35#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
36
37#include <linux/pgtable.h>
38
39/* Software KASAN implementations use shadow memory. */
40
41#ifdef CONFIG_KASAN_SW_TAGS
42/* This matches KASAN_TAG_INVALID. */
43#define KASAN_SHADOW_INIT 0xFE
44#else
45#define KASAN_SHADOW_INIT 0
46#endif
47
48#ifndef PTE_HWTABLE_PTRS
49#define PTE_HWTABLE_PTRS 0
50#endif
51
52extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
53extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
54extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
55extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
56extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
57
58int kasan_populate_early_shadow(const void *shadow_start,
59 const void *shadow_end);
60
61#ifndef kasan_mem_to_shadow
62static inline void *kasan_mem_to_shadow(const void *addr)
63{
64 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
65 + KASAN_SHADOW_OFFSET;
66}
67#endif
68
69int kasan_add_zero_shadow(void *start, unsigned long size);
70void kasan_remove_zero_shadow(void *start, unsigned long size);
71
72/* Enable reporting bugs after kasan_disable_current() */
73extern void kasan_enable_current(void);
74
75/* Disable reporting bugs for current task */
76extern void kasan_disable_current(void);
77
78#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
79
80static inline int kasan_add_zero_shadow(void *start, unsigned long size)
81{
82 return 0;
83}
84static inline void kasan_remove_zero_shadow(void *start,
85 unsigned long size)
86{}
87
88static inline void kasan_enable_current(void) {}
89static inline void kasan_disable_current(void) {}
90
91#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
92
93#ifdef CONFIG_KASAN_HW_TAGS
94
95#else /* CONFIG_KASAN_HW_TAGS */
96
97#endif /* CONFIG_KASAN_HW_TAGS */
98
99static inline bool kasan_has_integrated_init(void)
100{
101 return kasan_hw_tags_enabled();
102}
103
104#ifdef CONFIG_KASAN
105void __kasan_unpoison_range(const void *addr, size_t size);
106static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
107{
108 if (kasan_enabled())
109 __kasan_unpoison_range(addr, size);
110}
111
112void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
113static __always_inline void kasan_poison_pages(struct page *page,
114 unsigned int order, bool init)
115{
116 if (kasan_enabled())
117 __kasan_poison_pages(page, order, init);
118}
119
120bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
121static __always_inline bool kasan_unpoison_pages(struct page *page,
122 unsigned int order, bool init)
123{
124 if (kasan_enabled())
125 return __kasan_unpoison_pages(page, order, init);
126 return false;
127}
128
129void __kasan_poison_slab(struct slab *slab);
130static __always_inline void kasan_poison_slab(struct slab *slab)
131{
132 if (kasan_enabled())
133 __kasan_poison_slab(slab);
134}
135
136void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
137/**
138 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
139 * @cache: Cache the object belong to.
140 * @object: Pointer to the object.
141 *
142 * This function is intended for the slab allocator's internal use. It
143 * temporarily unpoisons an object from a newly allocated slab without doing
144 * anything else. The object must later be repoisoned by
145 * kasan_poison_new_object().
146 */
147static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
148 void *object)
149{
150 if (kasan_enabled())
151 __kasan_unpoison_new_object(cache, object);
152}
153
154void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
155/**
156 * kasan_poison_new_object - Repoison a new slab object.
157 * @cache: Cache the object belong to.
158 * @object: Pointer to the object.
159 *
160 * This function is intended for the slab allocator's internal use. It
161 * repoisons an object that was previously unpoisoned by
162 * kasan_unpoison_new_object() without doing anything else.
163 */
164static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
165 void *object)
166{
167 if (kasan_enabled())
168 __kasan_poison_new_object(cache, object);
169}
170
171void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
172 const void *object);
173static __always_inline void * __must_check kasan_init_slab_obj(
174 struct kmem_cache *cache, const void *object)
175{
176 if (kasan_enabled())
177 return __kasan_init_slab_obj(cache, object);
178 return (void *)object;
179}
180
181bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
182 unsigned long ip);
183/**
184 * kasan_slab_pre_free - Check whether freeing a slab object is safe.
185 * @object: Object to be freed.
186 *
187 * This function checks whether freeing the given object is safe. It may
188 * check for double-free and invalid-free bugs and report them.
189 *
190 * This function is intended only for use by the slab allocator.
191 *
192 * @Return true if freeing the object is unsafe; false otherwise.
193 */
194static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
195 void *object)
196{
197 if (kasan_enabled())
198 return __kasan_slab_pre_free(s, object, _RET_IP_);
199 return false;
200}
201
202bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
203 bool still_accessible, bool no_quarantine);
204/**
205 * kasan_slab_free - Poison, initialize, and quarantine a slab object.
206 * @object: Object to be freed.
207 * @init: Whether to initialize the object.
208 * @still_accessible: Whether the object contents are still accessible.
209 *
210 * This function informs that a slab object has been freed and is not
211 * supposed to be accessed anymore, except when @still_accessible is set
212 * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
213 * grace period might not have passed yet).
214 *
215 * For KASAN modes that have integrated memory initialization
216 * (kasan_has_integrated_init() == true), this function also initializes
217 * the object's memory. For other modes, the @init argument is ignored.
218 *
219 * This function might also take ownership of the object to quarantine it.
220 * When this happens, KASAN will defer freeing the object to a later
221 * stage and handle it internally until then. The return value indicates
222 * whether KASAN took ownership of the object.
223 *
224 * This function is intended only for use by the slab allocator.
225 *
226 * @Return true if KASAN took ownership of the object; false otherwise.
227 */
228static __always_inline bool kasan_slab_free(struct kmem_cache *s,
229 void *object, bool init,
230 bool still_accessible,
231 bool no_quarantine)
232{
233 if (kasan_enabled())
234 return __kasan_slab_free(s, object, init, still_accessible,
235 no_quarantine);
236 return false;
237}
238
239void __kasan_kfree_large(void *ptr, unsigned long ip);
240static __always_inline void kasan_kfree_large(void *ptr)
241{
242 if (kasan_enabled())
243 __kasan_kfree_large(ptr, _RET_IP_);
244}
245
246void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
247 void *object, gfp_t flags, bool init);
248static __always_inline void * __must_check kasan_slab_alloc(
249 struct kmem_cache *s, void *object, gfp_t flags, bool init)
250{
251 if (kasan_enabled())
252 return __kasan_slab_alloc(s, object, flags, init);
253 return object;
254}
255
256void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
257 size_t size, gfp_t flags);
258static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
259 const void *object, size_t size, gfp_t flags)
260{
261 if (kasan_enabled())
262 return __kasan_kmalloc(s, object, size, flags);
263 return (void *)object;
264}
265
266void * __must_check __kasan_kmalloc_large(const void *ptr,
267 size_t size, gfp_t flags);
268static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
269 size_t size, gfp_t flags)
270{
271 if (kasan_enabled())
272 return __kasan_kmalloc_large(ptr, size, flags);
273 return (void *)ptr;
274}
275
276void * __must_check __kasan_krealloc(const void *object,
277 size_t new_size, gfp_t flags);
278static __always_inline void * __must_check kasan_krealloc(const void *object,
279 size_t new_size, gfp_t flags)
280{
281 if (kasan_enabled())
282 return __kasan_krealloc(object, new_size, flags);
283 return (void *)object;
284}
285
286bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
287 unsigned long ip);
288/**
289 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
290 * @page: Pointer to the page allocation.
291 * @order: Order of the allocation.
292 *
293 * This function is intended for kernel subsystems that cache page allocations
294 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
295 *
296 * This function is similar to kasan_mempool_poison_object() but operates on
297 * page allocations.
298 *
299 * Before the poisoned allocation can be reused, it must be unpoisoned via
300 * kasan_mempool_unpoison_pages().
301 *
302 * Return: true if the allocation can be safely reused; false otherwise.
303 */
304static __always_inline bool kasan_mempool_poison_pages(struct page *page,
305 unsigned int order)
306{
307 if (kasan_enabled())
308 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
309 return true;
310}
311
312void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
313 unsigned long ip);
314/**
315 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
316 * @page: Pointer to the page allocation.
317 * @order: Order of the allocation.
318 *
319 * This function is intended for kernel subsystems that cache page allocations
320 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
321 *
322 * This function unpoisons a page allocation that was previously poisoned by
323 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
324 * the tag-based modes, this function assigns a new tag to the allocation.
325 */
326static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
327 unsigned int order)
328{
329 if (kasan_enabled())
330 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
331}
332
333bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
334/**
335 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
336 * @ptr: Pointer to the slab allocation.
337 *
338 * This function is intended for kernel subsystems that cache slab allocations
339 * to reuse them instead of freeing them back to the slab allocator (e.g.
340 * mempool).
341 *
342 * This function poisons a slab allocation and saves a free stack trace for it
343 * without initializing the allocation's memory and without putting it into the
344 * quarantine (for the Generic mode).
345 *
346 * This function also performs checks to detect double-free and invalid-free
347 * bugs and reports them. The caller can use the return value of this function
348 * to find out if the allocation is buggy.
349 *
350 * Before the poisoned allocation can be reused, it must be unpoisoned via
351 * kasan_mempool_unpoison_object().
352 *
353 * This function operates on all slab allocations including large kmalloc
354 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
355 * size > KMALLOC_MAX_SIZE).
356 *
357 * Return: true if the allocation can be safely reused; false otherwise.
358 */
359static __always_inline bool kasan_mempool_poison_object(void *ptr)
360{
361 if (kasan_enabled())
362 return __kasan_mempool_poison_object(ptr, _RET_IP_);
363 return true;
364}
365
366void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
367/**
368 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
369 * @ptr: Pointer to the slab allocation.
370 * @size: Size to be unpoisoned.
371 *
372 * This function is intended for kernel subsystems that cache slab allocations
373 * to reuse them instead of freeing them back to the slab allocator (e.g.
374 * mempool).
375 *
376 * This function unpoisons a slab allocation that was previously poisoned via
377 * kasan_mempool_poison_object() and saves an alloc stack trace for it without
378 * initializing the allocation's memory. For the tag-based modes, this function
379 * does not assign a new tag to the allocation and instead restores the
380 * original tags based on the pointer value.
381 *
382 * This function operates on all slab allocations including large kmalloc
383 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
384 * size > KMALLOC_MAX_SIZE).
385 */
386static __always_inline void kasan_mempool_unpoison_object(void *ptr,
387 size_t size)
388{
389 if (kasan_enabled())
390 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
391}
392
393/*
394 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
395 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
396 */
397bool __kasan_check_byte(const void *addr, unsigned long ip);
398static __always_inline bool kasan_check_byte(const void *addr)
399{
400 if (kasan_enabled())
401 return __kasan_check_byte(addr, _RET_IP_);
402 return true;
403}
404
405#else /* CONFIG_KASAN */
406
407static inline void kasan_unpoison_range(const void *address, size_t size) {}
408static inline void kasan_poison_pages(struct page *page, unsigned int order,
409 bool init) {}
410static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
411 bool init)
412{
413 return false;
414}
415static inline void kasan_poison_slab(struct slab *slab) {}
416static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
417 void *object) {}
418static inline void kasan_poison_new_object(struct kmem_cache *cache,
419 void *object) {}
420static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
421 const void *object)
422{
423 return (void *)object;
424}
425
426static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
427{
428 return false;
429}
430
431static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
432 bool init, bool still_accessible,
433 bool no_quarantine)
434{
435 return false;
436}
437static inline void kasan_kfree_large(void *ptr) {}
438static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
439 gfp_t flags, bool init)
440{
441 return object;
442}
443static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
444 size_t size, gfp_t flags)
445{
446 return (void *)object;
447}
448static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
449{
450 return (void *)ptr;
451}
452static inline void *kasan_krealloc(const void *object, size_t new_size,
453 gfp_t flags)
454{
455 return (void *)object;
456}
457static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
458{
459 return true;
460}
461static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
462static inline bool kasan_mempool_poison_object(void *ptr)
463{
464 return true;
465}
466static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
467
468static inline bool kasan_check_byte(const void *address)
469{
470 return true;
471}
472
473#endif /* CONFIG_KASAN */
474
475#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
476void kasan_unpoison_task_stack(struct task_struct *task);
477asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
478#else
479static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
480static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
481#endif
482
483#ifdef CONFIG_KASAN_GENERIC
484
485struct kasan_cache {
486 int alloc_meta_offset;
487 int free_meta_offset;
488};
489
490size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
491void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
492 slab_flags_t *flags);
493
494void kasan_cache_shrink(struct kmem_cache *cache);
495void kasan_cache_shutdown(struct kmem_cache *cache);
496void kasan_record_aux_stack(void *ptr);
497
498#else /* CONFIG_KASAN_GENERIC */
499
500/* Tag-based KASAN modes do not use per-object metadata. */
501static inline size_t kasan_metadata_size(struct kmem_cache *cache,
502 bool in_object)
503{
504 return 0;
505}
506/* And no cache-related metadata initialization is required. */
507static inline void kasan_cache_create(struct kmem_cache *cache,
508 unsigned int *size,
509 slab_flags_t *flags) {}
510
511static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
512static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
513static inline void kasan_record_aux_stack(void *ptr) {}
514
515#endif /* CONFIG_KASAN_GENERIC */
516
517#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
518
519static inline void *kasan_reset_tag(const void *addr)
520{
521 return (void *)arch_kasan_reset_tag(addr);
522}
523
524/**
525 * kasan_report - print a report about a bad memory access detected by KASAN
526 * @addr: address of the bad access
527 * @size: size of the bad access
528 * @is_write: whether the bad access is a write or a read
529 * @ip: instruction pointer for the accessibility check or the bad access itself
530 */
531bool kasan_report(const void *addr, size_t size,
532 bool is_write, unsigned long ip);
533
534#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
535
536static inline void *kasan_reset_tag(const void *addr)
537{
538 return (void *)addr;
539}
540
541#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
542
543#ifdef CONFIG_KASAN_HW_TAGS
544
545void kasan_report_async(void);
546
547#endif /* CONFIG_KASAN_HW_TAGS */
548
549#ifdef CONFIG_KASAN_GENERIC
550void __init kasan_init_generic(void);
551#else
552static inline void kasan_init_generic(void) { }
553#endif
554
555#ifdef CONFIG_KASAN_SW_TAGS
556void __init kasan_init_sw_tags(void);
557#else
558static inline void kasan_init_sw_tags(void) { }
559#endif
560
561#ifdef CONFIG_KASAN_HW_TAGS
562void kasan_init_hw_tags_cpu(void);
563void __init kasan_init_hw_tags(void);
564#else
565static inline void kasan_init_hw_tags_cpu(void) { }
566static inline void kasan_init_hw_tags(void) { }
567#endif
568
569#ifdef CONFIG_KASAN_VMALLOC
570
571#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
572
573void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
574int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
575void kasan_release_vmalloc(unsigned long start, unsigned long end,
576 unsigned long free_region_start,
577 unsigned long free_region_end,
578 unsigned long flags);
579
580#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
581
582static inline void kasan_populate_early_vm_area_shadow(void *start,
583 unsigned long size)
584{ }
585static inline int kasan_populate_vmalloc(unsigned long start,
586 unsigned long size, gfp_t gfp_mask)
587{
588 return 0;
589}
590static inline void kasan_release_vmalloc(unsigned long start,
591 unsigned long end,
592 unsigned long free_region_start,
593 unsigned long free_region_end,
594 unsigned long flags) { }
595
596#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
597
598void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
599 kasan_vmalloc_flags_t flags);
600static __always_inline void *kasan_unpoison_vmalloc(const void *start,
601 unsigned long size,
602 kasan_vmalloc_flags_t flags)
603{
604 if (kasan_enabled())
605 return __kasan_unpoison_vmalloc(start, size, flags);
606 return (void *)start;
607}
608
609void __kasan_poison_vmalloc(const void *start, unsigned long size);
610static __always_inline void kasan_poison_vmalloc(const void *start,
611 unsigned long size)
612{
613 if (kasan_enabled())
614 __kasan_poison_vmalloc(start, size);
615}
616
617#else /* CONFIG_KASAN_VMALLOC */
618
619static inline void kasan_populate_early_vm_area_shadow(void *start,
620 unsigned long size) { }
621static inline int kasan_populate_vmalloc(unsigned long start,
622 unsigned long size, gfp_t gfp_mask)
623{
624 return 0;
625}
626static inline void kasan_release_vmalloc(unsigned long start,
627 unsigned long end,
628 unsigned long free_region_start,
629 unsigned long free_region_end,
630 unsigned long flags) { }
631
632static inline void *kasan_unpoison_vmalloc(const void *start,
633 unsigned long size,
634 kasan_vmalloc_flags_t flags)
635{
636 return (void *)start;
637}
638static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
639{ }
640
641#endif /* CONFIG_KASAN_VMALLOC */
642
643#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
644 !defined(CONFIG_KASAN_VMALLOC)
645
646/*
647 * These functions allocate and free shadow memory for kernel modules.
648 * They are only required when KASAN_VMALLOC is not supported, as otherwise
649 * shadow memory is allocated by the generic vmalloc handlers.
650 */
651int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
652void kasan_free_module_shadow(const struct vm_struct *vm);
653
654#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
655
656static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
657static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
658
659#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
660
661#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
662void kasan_non_canonical_hook(unsigned long addr);
663#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
664static inline void kasan_non_canonical_hook(unsigned long addr) { }
665#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
666
667#endif /* LINUX_KASAN_H */
668