| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef __ASM_GENERIC_MMIOWB_H | 
|---|
| 3 | #define __ASM_GENERIC_MMIOWB_H | 
|---|
| 4 |  | 
|---|
| 5 | /* | 
|---|
| 6 | * Generic implementation of mmiowb() tracking for spinlocks. | 
|---|
| 7 | * | 
|---|
| 8 | * If your architecture doesn't ensure that writes to an I/O peripheral | 
|---|
| 9 | * within two spinlocked sections on two different CPUs are seen by the | 
|---|
| 10 | * peripheral in the order corresponding to the lock handover, then you | 
|---|
| 11 | * need to follow these FIVE easy steps: | 
|---|
| 12 | * | 
|---|
| 13 | * 	1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy) | 
|---|
| 14 | *	   in asm/mmiowb.h, then #include this file | 
|---|
| 15 | *	2. Ensure your I/O write accessors call mmiowb_set_pending() | 
|---|
| 16 | *	3. Select ARCH_HAS_MMIOWB | 
|---|
| 17 | *	4. Untangle the resulting mess of header files | 
|---|
| 18 | *	5. Complain to your architects | 
|---|
| 19 | */ | 
|---|
| 20 | #ifdef CONFIG_MMIOWB | 
|---|
| 21 |  | 
|---|
| 22 | #include <linux/compiler.h> | 
|---|
| 23 | #include <asm-generic/mmiowb_types.h> | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef arch_mmiowb_state | 
|---|
| 26 | #include <asm/percpu.h> | 
|---|
| 27 | #include <asm/smp.h> | 
|---|
| 28 |  | 
|---|
| 29 | DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); | 
|---|
| 30 | #define __mmiowb_state()	raw_cpu_ptr(&__mmiowb_state) | 
|---|
| 31 | #else | 
|---|
| 32 | #define __mmiowb_state()	arch_mmiowb_state() | 
|---|
| 33 | #endif	/* arch_mmiowb_state */ | 
|---|
| 34 |  | 
|---|
| 35 | static inline void mmiowb_set_pending(void) | 
|---|
| 36 | { | 
|---|
| 37 | struct mmiowb_state *ms = __mmiowb_state(); | 
|---|
| 38 |  | 
|---|
| 39 | if (likely(ms->nesting_count)) | 
|---|
| 40 | ms->mmiowb_pending = ms->nesting_count; | 
|---|
| 41 | } | 
|---|
| 42 |  | 
|---|
| 43 | static inline void mmiowb_spin_lock(void) | 
|---|
| 44 | { | 
|---|
| 45 | struct mmiowb_state *ms = __mmiowb_state(); | 
|---|
| 46 | ms->nesting_count++; | 
|---|
| 47 | } | 
|---|
| 48 |  | 
|---|
| 49 | static inline void mmiowb_spin_unlock(void) | 
|---|
| 50 | { | 
|---|
| 51 | struct mmiowb_state *ms = __mmiowb_state(); | 
|---|
| 52 |  | 
|---|
| 53 | if (unlikely(ms->mmiowb_pending)) { | 
|---|
| 54 | ms->mmiowb_pending = 0; | 
|---|
| 55 | mmiowb(); | 
|---|
| 56 | } | 
|---|
| 57 |  | 
|---|
| 58 | ms->nesting_count--; | 
|---|
| 59 | } | 
|---|
| 60 | #else | 
|---|
| 61 | #define mmiowb_set_pending()		do { } while (0) | 
|---|
| 62 | #define mmiowb_spin_lock()		do { } while (0) | 
|---|
| 63 | #define mmiowb_spin_unlock()		do { } while (0) | 
|---|
| 64 | #endif	/* CONFIG_MMIOWB */ | 
|---|
| 65 | #endif	/* __ASM_GENERIC_MMIOWB_H */ | 
|---|
| 66 |  | 
|---|