| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 |  | 
|---|
| 3 | /* | 
|---|
| 4 | * Linux-specific definitions for managing interactions with Microsoft's | 
|---|
| 5 | * Hyper-V hypervisor. The definitions in this file are architecture | 
|---|
| 6 | * independent. See arch/<arch>/include/asm/mshyperv.h for definitions | 
|---|
| 7 | * that are specific to architecture <arch>. | 
|---|
| 8 | * | 
|---|
| 9 | * Definitions that are derived from Hyper-V code or headers should not go in | 
|---|
| 10 | * this file, but should instead go in the relevant files in include/hyperv. | 
|---|
| 11 | * | 
|---|
| 12 | * Copyright (C) 2019, Microsoft, Inc. | 
|---|
| 13 | * | 
|---|
| 14 | * Author : Michael Kelley <mikelley@microsoft.com> | 
|---|
| 15 | */ | 
|---|
| 16 |  | 
|---|
| 17 | #ifndef _ASM_GENERIC_MSHYPERV_H | 
|---|
| 18 | #define _ASM_GENERIC_MSHYPERV_H | 
|---|
| 19 |  | 
|---|
| 20 | #include <linux/types.h> | 
|---|
| 21 | #include <linux/atomic.h> | 
|---|
| 22 | #include <linux/bitops.h> | 
|---|
| 23 | #include <acpi/acpi_numa.h> | 
|---|
| 24 | #include <linux/cpumask.h> | 
|---|
| 25 | #include <linux/nmi.h> | 
|---|
| 26 | #include <asm/ptrace.h> | 
|---|
| 27 | #include <hyperv/hvhdk.h> | 
|---|
| 28 |  | 
|---|
| 29 | #define VTPM_BASE_ADDRESS 0xfed40000 | 
|---|
| 30 |  | 
|---|
| 31 | enum hv_partition_type { | 
|---|
| 32 | HV_PARTITION_TYPE_GUEST, | 
|---|
| 33 | HV_PARTITION_TYPE_ROOT, | 
|---|
| 34 | HV_PARTITION_TYPE_L1VH, | 
|---|
| 35 | }; | 
|---|
| 36 |  | 
|---|
| 37 | struct ms_hyperv_info { | 
|---|
| 38 | u32 features; | 
|---|
| 39 | u32 priv_high; | 
|---|
| 40 | u32 ext_features; | 
|---|
| 41 | u32 misc_features; | 
|---|
| 42 | u32 hints; | 
|---|
| 43 | u32 nested_features; | 
|---|
| 44 | u32 max_vp_index; | 
|---|
| 45 | u32 max_lp_index; | 
|---|
| 46 | u8 vtl; | 
|---|
| 47 | union { | 
|---|
| 48 | u32 isolation_config_a; | 
|---|
| 49 | struct { | 
|---|
| 50 | u32 paravisor_present : 1; | 
|---|
| 51 | u32 reserved_a1 : 31; | 
|---|
| 52 | }; | 
|---|
| 53 | }; | 
|---|
| 54 | union { | 
|---|
| 55 | u32 isolation_config_b; | 
|---|
| 56 | struct { | 
|---|
| 57 | u32 cvm_type : 4; | 
|---|
| 58 | u32 reserved_b1 : 1; | 
|---|
| 59 | u32 shared_gpa_boundary_active : 1; | 
|---|
| 60 | u32 shared_gpa_boundary_bits : 6; | 
|---|
| 61 | u32 reserved_b2 : 20; | 
|---|
| 62 | }; | 
|---|
| 63 | }; | 
|---|
| 64 | u64 shared_gpa_boundary; | 
|---|
| 65 | }; | 
|---|
| 66 | extern struct ms_hyperv_info ms_hyperv; | 
|---|
| 67 | extern bool hv_nested; | 
|---|
| 68 | extern u64 hv_current_partition_id; | 
|---|
| 69 | extern enum hv_partition_type hv_curr_partition_type; | 
|---|
| 70 |  | 
|---|
| 71 | extern void * __percpu *hyperv_pcpu_input_arg; | 
|---|
| 72 | extern void * __percpu *hyperv_pcpu_output_arg; | 
|---|
| 73 |  | 
|---|
| 74 | u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); | 
|---|
| 75 | u64 hv_do_fast_hypercall8(u16 control, u64 input8); | 
|---|
| 76 | u64 hv_do_fast_hypercall16(u16 control, u64 input1, u64 input2); | 
|---|
| 77 |  | 
|---|
| 78 | bool hv_isolation_type_snp(void); | 
|---|
| 79 | bool hv_isolation_type_tdx(void); | 
|---|
| 80 |  | 
|---|
| 81 | /* | 
|---|
| 82 | * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64), | 
|---|
| 83 | * it doesn't provide a recommendation flag and AEOI must be disabled. | 
|---|
| 84 | */ | 
|---|
| 85 | static inline bool hv_recommend_using_aeoi(void) | 
|---|
| 86 | { | 
|---|
| 87 | #ifdef HV_DEPRECATING_AEOI_RECOMMENDED | 
|---|
| 88 | return !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED); | 
|---|
| 89 | #else | 
|---|
| 90 | return false; | 
|---|
| 91 | #endif | 
|---|
| 92 | } | 
|---|
| 93 |  | 
|---|
| 94 | static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node) | 
|---|
| 95 | { | 
|---|
| 96 | struct hv_proximity_domain_info pxm_info = {}; | 
|---|
| 97 |  | 
|---|
| 98 | if (node != NUMA_NO_NODE) { | 
|---|
| 99 | pxm_info.domain_id = node_to_pxm(node); | 
|---|
| 100 | pxm_info.flags.proximity_info_valid = 1; | 
|---|
| 101 | pxm_info.flags.proximity_preferred = 1; | 
|---|
| 102 | } | 
|---|
| 103 |  | 
|---|
| 104 | return pxm_info; | 
|---|
| 105 | } | 
|---|
| 106 |  | 
|---|
| 107 | /* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */ | 
|---|
| 108 | static inline int hv_result(u64 status) | 
|---|
| 109 | { | 
|---|
| 110 | return status & HV_HYPERCALL_RESULT_MASK; | 
|---|
| 111 | } | 
|---|
| 112 |  | 
|---|
| 113 | static inline bool hv_result_success(u64 status) | 
|---|
| 114 | { | 
|---|
| 115 | return hv_result(status) == HV_STATUS_SUCCESS; | 
|---|
| 116 | } | 
|---|
| 117 |  | 
|---|
| 118 | static inline unsigned int hv_repcomp(u64 status) | 
|---|
| 119 | { | 
|---|
| 120 | /* Bits [43:32] of status have 'Reps completed' data. */ | 
|---|
| 121 | return (status & HV_HYPERCALL_REP_COMP_MASK) >> | 
|---|
| 122 | HV_HYPERCALL_REP_COMP_OFFSET; | 
|---|
| 123 | } | 
|---|
| 124 |  | 
|---|
| 125 | /* | 
|---|
| 126 | * Rep hypercalls. Callers of this functions are supposed to ensure that | 
|---|
| 127 | * rep_count and varhead_size comply with Hyper-V hypercall definition. | 
|---|
| 128 | */ | 
|---|
| 129 | static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, | 
|---|
| 130 | void *input, void *output) | 
|---|
| 131 | { | 
|---|
| 132 | u64 control = code; | 
|---|
| 133 | u64 status; | 
|---|
| 134 | u16 rep_comp; | 
|---|
| 135 |  | 
|---|
| 136 | control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; | 
|---|
| 137 | control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; | 
|---|
| 138 |  | 
|---|
| 139 | do { | 
|---|
| 140 | status = hv_do_hypercall(control, inputaddr: input, outputaddr: output); | 
|---|
| 141 | if (!hv_result_success(status)) | 
|---|
| 142 | return status; | 
|---|
| 143 |  | 
|---|
| 144 | rep_comp = hv_repcomp(status); | 
|---|
| 145 |  | 
|---|
| 146 | control &= ~HV_HYPERCALL_REP_START_MASK; | 
|---|
| 147 | control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; | 
|---|
| 148 |  | 
|---|
| 149 | touch_nmi_watchdog(); | 
|---|
| 150 | } while (rep_comp < rep_count); | 
|---|
| 151 |  | 
|---|
| 152 | return status; | 
|---|
| 153 | } | 
|---|
| 154 |  | 
|---|
| 155 | /* Generate the guest OS identifier as described in the Hyper-V TLFS */ | 
|---|
| 156 | static inline u64 hv_generate_guest_id(u64 kernel_version) | 
|---|
| 157 | { | 
|---|
| 158 | u64 guest_id; | 
|---|
| 159 |  | 
|---|
| 160 | guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48); | 
|---|
| 161 | guest_id |= (kernel_version << 16); | 
|---|
| 162 |  | 
|---|
| 163 | return guest_id; | 
|---|
| 164 | } | 
|---|
| 165 |  | 
|---|
| 166 | #if IS_ENABLED(CONFIG_HYPERV_VMBUS) | 
|---|
| 167 | /* Free the message slot and signal end-of-message if required */ | 
|---|
| 168 | static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) | 
|---|
| 169 | { | 
|---|
| 170 | /* | 
|---|
| 171 | * On crash we're reading some other CPU's message page and we need | 
|---|
| 172 | * to be careful: this other CPU may already had cleared the header | 
|---|
| 173 | * and the host may already had delivered some other message there. | 
|---|
| 174 | * In case we blindly write msg->header.message_type we're going | 
|---|
| 175 | * to lose it. We can still lose a message of the same type but | 
|---|
| 176 | * we count on the fact that there can only be one | 
|---|
| 177 | * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages | 
|---|
| 178 | * on crash. | 
|---|
| 179 | */ | 
|---|
| 180 | if (cmpxchg(&msg->header.message_type, old_msg_type, | 
|---|
| 181 | HVMSG_NONE) != old_msg_type) | 
|---|
| 182 | return; | 
|---|
| 183 |  | 
|---|
| 184 | /* | 
|---|
| 185 | * The cmxchg() above does an implicit memory barrier to | 
|---|
| 186 | * ensure the write to MessageType (ie set to | 
|---|
| 187 | * HVMSG_NONE) happens before we read the | 
|---|
| 188 | * MessagePending and EOMing. Otherwise, the EOMing | 
|---|
| 189 | * will not deliver any more messages since there is | 
|---|
| 190 | * no empty slot | 
|---|
| 191 | */ | 
|---|
| 192 | if (msg->header.message_flags.msg_pending) { | 
|---|
| 193 | /* | 
|---|
| 194 | * This will cause message queue rescan to | 
|---|
| 195 | * possibly deliver another msg from the | 
|---|
| 196 | * hypervisor | 
|---|
| 197 | */ | 
|---|
| 198 | hv_set_msr(HV_MSR_EOM, 0); | 
|---|
| 199 | } | 
|---|
| 200 | } | 
|---|
| 201 |  | 
|---|
| 202 | extern int vmbus_interrupt; | 
|---|
| 203 | extern int vmbus_irq; | 
|---|
| 204 | #endif /* CONFIG_HYPERV_VMBUS */ | 
|---|
| 205 |  | 
|---|
| 206 | int hv_get_hypervisor_version(union hv_hypervisor_version_info *info); | 
|---|
| 207 |  | 
|---|
| 208 | void hv_setup_vmbus_handler(void (*handler)(void)); | 
|---|
| 209 | void hv_remove_vmbus_handler(void); | 
|---|
| 210 | void hv_setup_stimer0_handler(void (*handler)(void)); | 
|---|
| 211 | void hv_remove_stimer0_handler(void); | 
|---|
| 212 |  | 
|---|
| 213 | void hv_setup_kexec_handler(void (*handler)(void)); | 
|---|
| 214 | void hv_remove_kexec_handler(void); | 
|---|
| 215 | void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); | 
|---|
| 216 | void hv_remove_crash_handler(void); | 
|---|
| 217 | void hv_setup_mshv_handler(void (*handler)(void)); | 
|---|
| 218 |  | 
|---|
| 219 | #if IS_ENABLED(CONFIG_HYPERV) | 
|---|
| 220 | /* | 
|---|
| 221 | * Hypervisor's notion of virtual processor ID is different from | 
|---|
| 222 | * Linux' notion of CPU ID. This information can only be retrieved | 
|---|
| 223 | * in the context of the calling CPU. Setup a map for easy access | 
|---|
| 224 | * to this information. | 
|---|
| 225 | */ | 
|---|
| 226 | extern u32 *hv_vp_index; | 
|---|
| 227 | extern u32 hv_max_vp_index; | 
|---|
| 228 |  | 
|---|
| 229 | extern u64 (*hv_read_reference_counter)(void); | 
|---|
| 230 |  | 
|---|
| 231 | /* Sentinel value for an uninitialized entry in hv_vp_index array */ | 
|---|
| 232 | #define VP_INVAL	U32_MAX | 
|---|
| 233 |  | 
|---|
| 234 | int __init hv_common_init(void); | 
|---|
| 235 | void __init hv_get_partition_id(void); | 
|---|
| 236 | void __init hv_common_free(void); | 
|---|
| 237 | void __init ms_hyperv_late_init(void); | 
|---|
| 238 | int hv_common_cpu_init(unsigned int cpu); | 
|---|
| 239 | int hv_common_cpu_die(unsigned int cpu); | 
|---|
| 240 | void hv_identify_partition_type(void); | 
|---|
| 241 |  | 
|---|
| 242 | /** | 
|---|
| 243 | * hv_cpu_number_to_vp_number() - Map CPU to VP. | 
|---|
| 244 | * @cpu_number: CPU number in Linux terms | 
|---|
| 245 | * | 
|---|
| 246 | * This function returns the mapping between the Linux processor | 
|---|
| 247 | * number and the hypervisor's virtual processor number, useful | 
|---|
| 248 | * in making hypercalls and such that talk about specific | 
|---|
| 249 | * processors. | 
|---|
| 250 | * | 
|---|
| 251 | * Return: Virtual processor number in Hyper-V terms | 
|---|
| 252 | */ | 
|---|
| 253 | static inline int hv_cpu_number_to_vp_number(int cpu_number) | 
|---|
| 254 | { | 
|---|
| 255 | return hv_vp_index[cpu_number]; | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|
| 258 | static inline int __cpumask_to_vpset(struct hv_vpset *vpset, | 
|---|
| 259 | const struct cpumask *cpus, | 
|---|
| 260 | bool (*func)(int cpu)) | 
|---|
| 261 | { | 
|---|
| 262 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; | 
|---|
| 263 | int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK; | 
|---|
| 264 |  | 
|---|
| 265 | /* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */ | 
|---|
| 266 | if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS) | 
|---|
| 267 | return 0; | 
|---|
| 268 |  | 
|---|
| 269 | /* | 
|---|
| 270 | * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex | 
|---|
| 271 | * structs are not cleared between calls, we risk flushing unneeded | 
|---|
| 272 | * vCPUs otherwise. | 
|---|
| 273 | */ | 
|---|
| 274 | for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++) | 
|---|
| 275 | vpset->bank_contents[vcpu_bank] = 0; | 
|---|
| 276 |  | 
|---|
| 277 | /* | 
|---|
| 278 | * Some banks may end up being empty but this is acceptable. | 
|---|
| 279 | */ | 
|---|
| 280 | for_each_cpu(cpu, cpus) { | 
|---|
| 281 | if (func && func(cpu)) | 
|---|
| 282 | continue; | 
|---|
| 283 | vcpu = hv_cpu_number_to_vp_number(cpu); | 
|---|
| 284 | if (vcpu == VP_INVAL) | 
|---|
| 285 | return -1; | 
|---|
| 286 | vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK; | 
|---|
| 287 | vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK; | 
|---|
| 288 | __set_bit(vcpu_offset, (unsigned long *) | 
|---|
| 289 | &vpset->bank_contents[vcpu_bank]); | 
|---|
| 290 | if (vcpu_bank >= nr_bank) | 
|---|
| 291 | nr_bank = vcpu_bank + 1; | 
|---|
| 292 | } | 
|---|
| 293 | vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); | 
|---|
| 294 | return nr_bank; | 
|---|
| 295 | } | 
|---|
| 296 |  | 
|---|
| 297 | /* | 
|---|
| 298 | * Convert a Linux cpumask into a Hyper-V VPset. In the _skip variant, | 
|---|
| 299 | * 'func' is called for each CPU present in cpumask.  If 'func' returns | 
|---|
| 300 | * true, that CPU is skipped -- i.e., that CPU from cpumask is *not* | 
|---|
| 301 | * added to the Hyper-V VPset. If 'func' is NULL, no CPUs are | 
|---|
| 302 | * skipped. | 
|---|
| 303 | */ | 
|---|
| 304 | static inline int cpumask_to_vpset(struct hv_vpset *vpset, | 
|---|
| 305 | const struct cpumask *cpus) | 
|---|
| 306 | { | 
|---|
| 307 | return __cpumask_to_vpset(vpset, cpus, NULL); | 
|---|
| 308 | } | 
|---|
| 309 |  | 
|---|
| 310 | static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset, | 
|---|
| 311 | const struct cpumask *cpus, | 
|---|
| 312 | bool (*func)(int cpu)) | 
|---|
| 313 | { | 
|---|
| 314 | return __cpumask_to_vpset(vpset, cpus, func); | 
|---|
| 315 | } | 
|---|
| 316 |  | 
|---|
| 317 | #define _hv_status_fmt(fmt) "%s: Hyper-V status: %#x = %s: " fmt | 
|---|
| 318 | #define hv_status_printk(level, status, fmt, ...) \ | 
|---|
| 319 | do { \ | 
|---|
| 320 | u64 __status = (status); \ | 
|---|
| 321 | pr_##level(_hv_status_fmt(fmt), __func__, hv_result(__status), \ | 
|---|
| 322 | hv_result_to_string(__status), ##__VA_ARGS__); \ | 
|---|
| 323 | } while (0) | 
|---|
| 324 | #define hv_status_err(status, fmt, ...) \ | 
|---|
| 325 | hv_status_printk(err, status, fmt, ##__VA_ARGS__) | 
|---|
| 326 | #define hv_status_debug(status, fmt, ...) \ | 
|---|
| 327 | hv_status_printk(debug, status, fmt, ##__VA_ARGS__) | 
|---|
| 328 |  | 
|---|
| 329 | const char *hv_result_to_string(u64 hv_status); | 
|---|
| 330 | int hv_result_to_errno(u64 status); | 
|---|
| 331 | void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); | 
|---|
| 332 | bool hv_is_hyperv_initialized(void); | 
|---|
| 333 | bool hv_is_hibernation_supported(void); | 
|---|
| 334 | enum hv_isolation_type hv_get_isolation_type(void); | 
|---|
| 335 | bool hv_is_isolation_supported(void); | 
|---|
| 336 | bool hv_isolation_type_snp(void); | 
|---|
| 337 | u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); | 
|---|
| 338 | u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2); | 
|---|
| 339 | void hyperv_cleanup(void); | 
|---|
| 340 | bool hv_query_ext_cap(u64 cap_query); | 
|---|
| 341 | void hv_setup_dma_ops(struct device *dev, bool coherent); | 
|---|
| 342 | #else /* CONFIG_HYPERV */ | 
|---|
| 343 | static inline void hv_identify_partition_type(void) {} | 
|---|
| 344 | static inline bool hv_is_hyperv_initialized(void) { return false; } | 
|---|
| 345 | static inline bool hv_is_hibernation_supported(void) { return false; } | 
|---|
| 346 | static inline void hyperv_cleanup(void) {} | 
|---|
| 347 | static inline void ms_hyperv_late_init(void) {} | 
|---|
| 348 | static inline bool hv_is_isolation_supported(void) { return false; } | 
|---|
| 349 | static inline enum hv_isolation_type hv_get_isolation_type(void) | 
|---|
| 350 | { | 
|---|
| 351 | return HV_ISOLATION_TYPE_NONE; | 
|---|
| 352 | } | 
|---|
| 353 | #endif /* CONFIG_HYPERV */ | 
|---|
| 354 |  | 
|---|
| 355 | #if IS_ENABLED(CONFIG_MSHV_ROOT) | 
|---|
| 356 | static inline bool hv_root_partition(void) | 
|---|
| 357 | { | 
|---|
| 358 | return hv_curr_partition_type == HV_PARTITION_TYPE_ROOT; | 
|---|
| 359 | } | 
|---|
| 360 | static inline bool hv_l1vh_partition(void) | 
|---|
| 361 | { | 
|---|
| 362 | return hv_curr_partition_type == HV_PARTITION_TYPE_L1VH; | 
|---|
| 363 | } | 
|---|
| 364 | static inline bool hv_parent_partition(void) | 
|---|
| 365 | { | 
|---|
| 366 | return hv_root_partition() || hv_l1vh_partition(); | 
|---|
| 367 | } | 
|---|
| 368 | int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); | 
|---|
| 369 | int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); | 
|---|
| 370 | int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); | 
|---|
| 371 |  | 
|---|
| 372 | #else /* CONFIG_MSHV_ROOT */ | 
|---|
| 373 | static inline bool hv_root_partition(void) { return false; } | 
|---|
| 374 | static inline bool hv_l1vh_partition(void) { return false; } | 
|---|
| 375 | static inline bool hv_parent_partition(void) { return false; } | 
|---|
| 376 | static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) | 
|---|
| 377 | { | 
|---|
| 378 | return -EOPNOTSUPP; | 
|---|
| 379 | } | 
|---|
| 380 | static inline int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id) | 
|---|
| 381 | { | 
|---|
| 382 | return -EOPNOTSUPP; | 
|---|
| 383 | } | 
|---|
| 384 | static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) | 
|---|
| 385 | { | 
|---|
| 386 | return -EOPNOTSUPP; | 
|---|
| 387 | } | 
|---|
| 388 | #endif /* CONFIG_MSHV_ROOT */ | 
|---|
| 389 |  | 
|---|
| 390 | #if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) | 
|---|
| 391 | u8 __init get_vtl(void); | 
|---|
| 392 | #else | 
|---|
| 393 | static inline u8 get_vtl(void) { return 0; } | 
|---|
| 394 | #endif | 
|---|
| 395 |  | 
|---|
| 396 | #endif | 
|---|
| 397 |  | 
|---|