1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_MWAIT_H
3#define _ASM_X86_MWAIT_H
4
5#include <linux/sched.h>
6#include <linux/sched/idle.h>
7
8#include <asm/cpufeature.h>
9#include <asm/nospec-branch.h>
10
11#define MWAIT_SUBSTATE_MASK 0xf
12#define MWAIT_CSTATE_MASK 0xf
13#define MWAIT_SUBSTATE_SIZE 4
14#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
15#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
16#define MWAIT_C1_SUBSTATE_MASK 0xf0
17
18#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
19#define CPUID5_ECX_INTERRUPT_BREAK 0x2
20
21#define MWAIT_ECX_INTERRUPT_BREAK 0x1
22#define MWAITX_ECX_TIMER_ENABLE BIT(1)
23#define MWAITX_MAX_WAIT_CYCLES UINT_MAX
24#define MWAITX_DISABLE_CSTATES 0xf0
25#define TPAUSE_C01_STATE 1
26#define TPAUSE_C02_STATE 0
27
28static __always_inline void __monitor(const void *eax, u32 ecx, u32 edx)
29{
30 /*
31 * Use the instruction mnemonic with implicit operands, as the LLVM
32 * assembler fails to assemble the mnemonic with explicit operands:
33 */
34 asm volatile("monitor" :: "a" (eax), "c" (ecx), "d" (edx));
35}
36
37static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
38{
39 asm volatile("monitorx" :: "a" (eax), "c" (ecx), "d"(edx));
40}
41
42static __always_inline void __mwait(u32 eax, u32 ecx)
43{
44 /*
45 * Use the instruction mnemonic with implicit operands, as the LLVM
46 * assembler fails to assemble the mnemonic with explicit operands:
47 */
48 asm volatile("mwait" :: "a" (eax), "c" (ecx));
49}
50
51/*
52 * MWAITX allows for a timer expiration to get the core out a wait state in
53 * addition to the default MWAIT exit condition of a store appearing at a
54 * monitored virtual address.
55 *
56 * Registers:
57 *
58 * MWAITX ECX[1]: enable timer if set
59 * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0
60 * frequency is the same as the TSC frequency.
61 *
62 * Below is a comparison between MWAIT and MWAITX on AMD processors:
63 *
64 * MWAIT MWAITX
65 * opcode 0f 01 c9 | 0f 01 fb
66 * ECX[0] value of RFLAGS.IF seen by instruction
67 * ECX[1] unused/#GP if set | enable timer if set
68 * ECX[31:2] unused/#GP if set
69 * EAX unused (reserve for hint)
70 * EBX[31:0] unused | max wait time (P0 clocks)
71 *
72 * MONITOR MONITORX
73 * opcode 0f 01 c8 | 0f 01 fa
74 * EAX (logical) address to monitor
75 * ECX #GP if not zero
76 */
77static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
78{
79 /* No need for TSA buffer clearing on AMD */
80
81 asm volatile("mwaitx" :: "a" (eax), "b" (ebx), "c" (ecx));
82}
83
84/*
85 * Re-enable interrupts right upon calling mwait in such a way that
86 * no interrupt can fire _before_ the execution of mwait, ie: no
87 * instruction must be placed between "sti" and "mwait".
88 *
89 * This is necessary because if an interrupt queues a timer before
90 * executing mwait, it would otherwise go unnoticed and the next tick
91 * would not be reprogrammed accordingly before mwait ever wakes up.
92 */
93static __always_inline void __sti_mwait(u32 eax, u32 ecx)
94{
95
96 asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
97}
98
99/*
100 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
101 * which can obviate IPI to trigger checking of need_resched.
102 * We execute MONITOR against need_resched and enter optimized wait state
103 * through MWAIT. Whenever someone changes need_resched, we would be woken
104 * up from MWAIT (without an IPI).
105 *
106 * New with Core Duo processors, MWAIT can take some hints based on CPU
107 * capability.
108 */
109static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx)
110{
111 if (need_resched())
112 return;
113
114 x86_idle_clear_cpu_buffers();
115
116 if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
117 const void *addr = &current_thread_info()->flags;
118
119 alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
120 __monitor(eax: addr, ecx: 0, edx: 0);
121
122 if (need_resched())
123 goto out;
124
125 if (ecx & 1) {
126 __mwait(eax, ecx);
127 } else {
128 __sti_mwait(eax, ecx);
129 raw_local_irq_disable();
130 }
131 }
132
133out:
134 current_clr_polling();
135}
136
137/*
138 * Caller can specify whether to enter C0.1 (low latency, less
139 * power saving) or C0.2 state (saves more power, but longer wakeup
140 * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR
141 * which can force requests for C0.2 to be downgraded to C0.1.
142 */
143static inline void __tpause(u32 ecx, u32 edx, u32 eax)
144{
145 /* "tpause %ecx" */
146 asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1"
147 :: "c" (ecx), "d" (edx), "a" (eax));
148}
149
150#endif /* _ASM_X86_MWAIT_H */
151