1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2021-2022 Intel Corporation */
3#ifndef _ASM_X86_TDX_H
4#define _ASM_X86_TDX_H
5
6#include <linux/init.h>
7#include <linux/bits.h>
8#include <linux/mmzone.h>
9
10#include <asm/errno.h>
11#include <asm/ptrace.h>
12#include <asm/trapnr.h>
13#include <asm/shared/tdx.h>
14
15/*
16 * SW-defined error codes.
17 *
18 * Bits 47:40 == 0xFF indicate Reserved status code class that never used by
19 * TDX module.
20 */
21#define TDX_ERROR _BITUL(63)
22#define TDX_NON_RECOVERABLE _BITUL(62)
23#define TDX_SW_ERROR (TDX_ERROR | GENMASK_ULL(47, 40))
24#define TDX_SEAMCALL_VMFAILINVALID (TDX_SW_ERROR | _UL(0xFFFF0000))
25
26#define TDX_SEAMCALL_GP (TDX_SW_ERROR | X86_TRAP_GP)
27#define TDX_SEAMCALL_UD (TDX_SW_ERROR | X86_TRAP_UD)
28
29/*
30 * TDX module SEAMCALL leaf function error codes
31 */
32#define TDX_SUCCESS 0ULL
33#define TDX_RND_NO_ENTROPY 0x8000020300000000ULL
34
35#ifndef __ASSEMBLER__
36
37#include <uapi/asm/mce.h>
38#include <asm/tdx_global_metadata.h>
39#include <linux/pgtable.h>
40
41/*
42 * Used by the #VE exception handler to gather the #VE exception
43 * info from the TDX module. This is a software only structure
44 * and not part of the TDX module/VMM ABI.
45 */
46struct ve_info {
47 u64 exit_reason;
48 u64 exit_qual;
49 /* Guest Linear (virtual) Address */
50 u64 gla;
51 /* Guest Physical Address */
52 u64 gpa;
53 u32 instr_len;
54 u32 instr_info;
55};
56
57#ifdef CONFIG_INTEL_TDX_GUEST
58
59void __init tdx_early_init(void);
60
61void tdx_get_ve_info(struct ve_info *ve);
62
63bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
64
65void tdx_halt(void);
66
67bool tdx_early_handle_ve(struct pt_regs *regs);
68
69int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
70
71int tdx_mcall_extend_rtmr(u8 index, u8 *data);
72
73u64 tdx_hcall_get_quote(u8 *buf, size_t size);
74
75void __init tdx_dump_attributes(u64 td_attr);
76void __init tdx_dump_td_ctls(u64 td_ctls);
77
78#else
79
80static inline void tdx_early_init(void) { };
81static inline void tdx_halt(void) { };
82
83static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
84
85#endif /* CONFIG_INTEL_TDX_GUEST */
86
87#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_INTEL_TDX_GUEST)
88long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
89 unsigned long p3, unsigned long p4);
90#else
91static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1,
92 unsigned long p2, unsigned long p3,
93 unsigned long p4)
94{
95 return -ENODEV;
96}
97#endif /* CONFIG_INTEL_TDX_GUEST && CONFIG_KVM_GUEST */
98
99#ifdef CONFIG_INTEL_TDX_HOST
100u64 __seamcall(u64 fn, struct tdx_module_args *args);
101u64 __seamcall_ret(u64 fn, struct tdx_module_args *args);
102u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args);
103void tdx_init(void);
104
105#include <linux/preempt.h>
106#include <asm/archrandom.h>
107#include <asm/processor.h>
108
109typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
110
111static __always_inline u64 __seamcall_dirty_cache(sc_func_t func, u64 fn,
112 struct tdx_module_args *args)
113{
114 lockdep_assert_preemption_disabled();
115
116 /*
117 * SEAMCALLs are made to the TDX module and can generate dirty
118 * cachelines of TDX private memory. Mark cache state incoherent
119 * so that the cache can be flushed during kexec.
120 *
121 * This needs to be done before actually making the SEAMCALL,
122 * because kexec-ing CPU could send NMI to stop remote CPUs,
123 * in which case even disabling IRQ won't help here.
124 */
125 this_cpu_write(cache_state_incoherent, true);
126
127 return func(fn, args);
128}
129
130static __always_inline u64 sc_retry(sc_func_t func, u64 fn,
131 struct tdx_module_args *args)
132{
133 int retry = RDRAND_RETRY_LOOPS;
134 u64 ret;
135
136 do {
137 preempt_disable();
138 ret = __seamcall_dirty_cache(func, fn, args);
139 preempt_enable();
140 } while (ret == TDX_RND_NO_ENTROPY && --retry);
141
142 return ret;
143}
144
145#define seamcall(_fn, _args) sc_retry(__seamcall, (_fn), (_args))
146#define seamcall_ret(_fn, _args) sc_retry(__seamcall_ret, (_fn), (_args))
147#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args))
148int tdx_cpu_enable(void);
149int tdx_enable(void);
150const char *tdx_dump_mce_info(struct mce *m);
151const struct tdx_sys_info *tdx_get_sysinfo(void);
152
153int tdx_guest_keyid_alloc(void);
154u32 tdx_get_nr_guest_keyids(void);
155void tdx_guest_keyid_free(unsigned int keyid);
156
157void tdx_quirk_reset_page(struct page *page);
158
159struct tdx_td {
160 /* TD root structure: */
161 struct page *tdr_page;
162
163 int tdcs_nr_pages;
164 /* TD control structure: */
165 struct page **tdcs_pages;
166
167 /* Size of `tdcx_pages` in struct tdx_vp */
168 int tdcx_nr_pages;
169};
170
171struct tdx_vp {
172 /* TDVP root page */
173 struct page *tdvpr_page;
174 /* precalculated page_to_phys(tdvpr_page) for use in noinstr code */
175 phys_addr_t tdvpr_pa;
176
177 /* TD vCPU control structure: */
178 struct page **tdcx_pages;
179};
180
181static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
182{
183 u64 ret;
184
185 ret = page_to_phys(page);
186 /* KeyID bits are just above the physical address bits: */
187 ret |= (u64)hkid << boot_cpu_data.x86_phys_bits;
188
189 return ret;
190}
191
192static inline int pg_level_to_tdx_sept_level(enum pg_level level)
193{
194 WARN_ON_ONCE(level == PG_LEVEL_NONE);
195 return level - 1;
196}
197
198u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args);
199u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page);
200u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2);
201u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
202u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page);
203u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
204u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2);
205u64 tdh_mng_key_config(struct tdx_td *td);
206u64 tdh_mng_create(struct tdx_td *td, u16 hkid);
207u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp);
208u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data);
209u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2);
210u64 tdh_mr_finalize(struct tdx_td *td);
211u64 tdh_vp_flush(struct tdx_vp *vp);
212u64 tdh_mng_vpflushdone(struct tdx_td *td);
213u64 tdh_mng_key_freeid(struct tdx_td *td);
214u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err);
215u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid);
216u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data);
217u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask);
218u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size);
219u64 tdh_mem_track(struct tdx_td *tdr);
220u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
221u64 tdh_phymem_cache_wb(bool resume);
222u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
223u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
224#else
225static inline void tdx_init(void) { }
226static inline int tdx_cpu_enable(void) { return -ENODEV; }
227static inline int tdx_enable(void) { return -ENODEV; }
228static inline u32 tdx_get_nr_guest_keyids(void) { return 0; }
229static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
230static inline const struct tdx_sys_info *tdx_get_sysinfo(void) { return NULL; }
231#endif /* CONFIG_INTEL_TDX_HOST */
232
233#ifdef CONFIG_KEXEC_CORE
234void tdx_cpu_flush_cache_for_kexec(void);
235#else
236static inline void tdx_cpu_flush_cache_for_kexec(void) { }
237#endif
238
239#endif /* !__ASSEMBLER__ */
240#endif /* _ASM_X86_TDX_H */
241