| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_MMU_CONTEXT_H | 
|---|
| 3 | #define _LINUX_MMU_CONTEXT_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <asm/mmu_context.h> | 
|---|
| 6 | #include <asm/mmu.h> | 
|---|
| 7 |  | 
|---|
| 8 | /* Architectures that care about IRQ state in switch_mm can override this. */ | 
|---|
| 9 | #ifndef switch_mm_irqs_off | 
|---|
| 10 | # define switch_mm_irqs_off switch_mm | 
|---|
| 11 | #endif | 
|---|
| 12 |  | 
|---|
| 13 | #ifndef leave_mm | 
|---|
| 14 | static inline void leave_mm(void) { } | 
|---|
| 15 | #endif | 
|---|
| 16 |  | 
|---|
| 17 | /* | 
|---|
| 18 | * CPUs that are capable of running user task @p. Must contain at least one | 
|---|
| 19 | * active CPU. It is assumed that the kernel can run on all CPUs, so calling | 
|---|
| 20 | * this for a kernel thread is pointless. | 
|---|
| 21 | * | 
|---|
| 22 | * By default, we assume a sane, homogeneous system. | 
|---|
| 23 | */ | 
|---|
| 24 | #ifndef task_cpu_possible_mask | 
|---|
| 25 | # define task_cpu_possible_mask(p)	cpu_possible_mask | 
|---|
| 26 | # define task_cpu_possible(cpu, p)	true | 
|---|
| 27 | # define task_cpu_fallback_mask(p)	housekeeping_cpumask(HK_TYPE_TICK) | 
|---|
| 28 | #else | 
|---|
| 29 | # define task_cpu_possible(cpu, p)	cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) | 
|---|
| 30 | #endif | 
|---|
| 31 |  | 
|---|
| 32 | #ifndef mm_untag_mask | 
|---|
| 33 | static inline unsigned long mm_untag_mask(struct mm_struct *mm) | 
|---|
| 34 | { | 
|---|
| 35 | return -1UL; | 
|---|
| 36 | } | 
|---|
| 37 | #endif | 
|---|
| 38 |  | 
|---|
| 39 | #ifndef arch_pgtable_dma_compat | 
|---|
| 40 | static inline bool arch_pgtable_dma_compat(struct mm_struct *mm) | 
|---|
| 41 | { | 
|---|
| 42 | return true; | 
|---|
| 43 | } | 
|---|
| 44 | #endif | 
|---|
| 45 |  | 
|---|
| 46 | #endif | 
|---|
| 47 |  | 
|---|