LCOV - code coverage report
Current view: top level - arch/x86/kernel/cpu - vmware.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 9 144 6.2 %
Date: 2021-04-22 12:43:58 Functions: 2 21 9.5 %

          Line data    Source code
       1             : /*
       2             :  * VMware Detection code.
       3             :  *
       4             :  * Copyright (C) 2008, VMware, Inc.
       5             :  * Author : Alok N Kataria <akataria@vmware.com>
       6             :  *
       7             :  * This program is free software; you can redistribute it and/or modify
       8             :  * it under the terms of the GNU General Public License as published by
       9             :  * the Free Software Foundation; either version 2 of the License, or
      10             :  * (at your option) any later version.
      11             :  *
      12             :  * This program is distributed in the hope that it will be useful, but
      13             :  * WITHOUT ANY WARRANTY; without even the implied warranty of
      14             :  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
      15             :  * NON INFRINGEMENT.  See the GNU General Public License for more
      16             :  * details.
      17             :  *
      18             :  * You should have received a copy of the GNU General Public License
      19             :  * along with this program; if not, write to the Free Software
      20             :  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
      21             :  *
      22             :  */
      23             : 
      24             : #include <linux/dmi.h>
      25             : #include <linux/init.h>
      26             : #include <linux/export.h>
      27             : #include <linux/clocksource.h>
      28             : #include <linux/cpu.h>
      29             : #include <linux/reboot.h>
      30             : #include <asm/div64.h>
      31             : #include <asm/x86_init.h>
      32             : #include <asm/hypervisor.h>
      33             : #include <asm/timer.h>
      34             : #include <asm/apic.h>
      35             : #include <asm/vmware.h>
      36             : #include <asm/svm.h>
      37             : 
      38             : #undef pr_fmt
      39             : #define pr_fmt(fmt)     "vmware: " fmt
      40             : 
      41             : #define CPUID_VMWARE_INFO_LEAF               0x40000000
      42             : #define CPUID_VMWARE_FEATURES_LEAF           0x40000010
      43             : #define CPUID_VMWARE_FEATURES_ECX_VMMCALL    BIT(0)
      44             : #define CPUID_VMWARE_FEATURES_ECX_VMCALL     BIT(1)
      45             : 
      46             : #define VMWARE_HYPERVISOR_MAGIC 0x564D5868
      47             : 
      48             : #define VMWARE_CMD_GETVERSION    10
      49             : #define VMWARE_CMD_GETHZ         45
      50             : #define VMWARE_CMD_GETVCPU_INFO  68
      51             : #define VMWARE_CMD_LEGACY_X2APIC  3
      52             : #define VMWARE_CMD_VCPU_RESERVED 31
      53             : #define VMWARE_CMD_STEALCLOCK    91
      54             : 
      55             : #define STEALCLOCK_NOT_AVAILABLE (-1)
      56             : #define STEALCLOCK_DISABLED        0
      57             : #define STEALCLOCK_ENABLED         1
      58             : 
      59             : #define VMWARE_PORT(cmd, eax, ebx, ecx, edx)                            \
      60             :         __asm__("inl (%%dx), %%eax" :                                 \
      61             :                 "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :            \
      62             :                 "a"(VMWARE_HYPERVISOR_MAGIC),                         \
      63             :                 "c"(VMWARE_CMD_##cmd),                                        \
      64             :                 "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) :                \
      65             :                 "memory")
      66             : 
      67             : #define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx)                          \
      68             :         __asm__("vmcall" :                                            \
      69             :                 "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :            \
      70             :                 "a"(VMWARE_HYPERVISOR_MAGIC),                         \
      71             :                 "c"(VMWARE_CMD_##cmd),                                        \
      72             :                 "d"(0), "b"(UINT_MAX) :                                     \
      73             :                 "memory")
      74             : 
      75             : #define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx)                         \
      76             :         __asm__("vmmcall" :                                           \
      77             :                 "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :            \
      78             :                 "a"(VMWARE_HYPERVISOR_MAGIC),                         \
      79             :                 "c"(VMWARE_CMD_##cmd),                                        \
      80             :                 "d"(0), "b"(UINT_MAX) :                                     \
      81             :                 "memory")
      82             : 
      83             : #define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do {                \
      84             :         switch (vmware_hypercall_mode) {                        \
      85             :         case CPUID_VMWARE_FEATURES_ECX_VMCALL:                  \
      86             :                 VMWARE_VMCALL(cmd, eax, ebx, ecx, edx);         \
      87             :                 break;                                          \
      88             :         case CPUID_VMWARE_FEATURES_ECX_VMMCALL:                 \
      89             :                 VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx);        \
      90             :                 break;                                          \
      91             :         default:                                                \
      92             :                 VMWARE_PORT(cmd, eax, ebx, ecx, edx);           \
      93             :                 break;                                          \
      94             :         }                                                       \
      95             :         } while (0)
      96             : 
      97             : struct vmware_steal_time {
      98             :         union {
      99             :                 uint64_t clock; /* stolen time counter in units of vtsc */
     100             :                 struct {
     101             :                         /* only for little-endian */
     102             :                         uint32_t clock_low;
     103             :                         uint32_t clock_high;
     104             :                 };
     105             :         };
     106             :         uint64_t reserved[7];
     107             : };
     108             : 
     109             : static unsigned long vmware_tsc_khz __ro_after_init;
     110             : static u8 vmware_hypercall_mode     __ro_after_init;
     111             : 
     112             : static inline int __vmware_platform(void)
     113             : {
     114             :         uint32_t eax, ebx, ecx, edx;
     115             :         VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx);
     116             :         return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
     117             : }
     118             : 
     119           0 : static unsigned long vmware_get_tsc_khz(void)
     120             : {
     121           0 :         return vmware_tsc_khz;
     122             : }
     123             : 
     124             : #ifdef CONFIG_PARAVIRT
     125             : static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
     126             : static bool vmw_sched_clock __initdata = true;
     127             : static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(64);
     128             : static bool has_steal_clock;
     129             : static bool steal_acc __initdata = true; /* steal time accounting */
     130             : 
     131           0 : static __init int setup_vmw_sched_clock(char *s)
     132             : {
     133           0 :         vmw_sched_clock = false;
     134           0 :         return 0;
     135             : }
     136             : early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
     137             : 
     138           0 : static __init int parse_no_stealacc(char *arg)
     139             : {
     140           0 :         steal_acc = false;
     141           0 :         return 0;
     142             : }
     143             : early_param("no-steal-acc", parse_no_stealacc);
     144             : 
     145           0 : static unsigned long long notrace vmware_sched_clock(void)
     146             : {
     147           0 :         unsigned long long ns;
     148             : 
     149           0 :         ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul,
     150             :                              vmware_cyc2ns.cyc2ns_shift);
     151           0 :         ns -= vmware_cyc2ns.cyc2ns_offset;
     152           0 :         return ns;
     153             : }
     154             : 
     155           0 : static void __init vmware_cyc2ns_setup(void)
     156             : {
     157           0 :         struct cyc2ns_data *d = &vmware_cyc2ns;
     158           0 :         unsigned long long tsc_now = rdtsc();
     159             : 
     160           0 :         clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
     161             :                                vmware_tsc_khz, NSEC_PER_MSEC, 0);
     162           0 :         d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
     163             :                                            d->cyc2ns_shift);
     164             : 
     165           0 :         pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
     166           0 : }
     167             : 
     168           0 : static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2)
     169             : {
     170           0 :         uint32_t result, info;
     171             : 
     172           0 :         asm volatile (VMWARE_HYPERCALL :
     173             :                 "=a"(result),
     174             :                 "=c"(info) :
     175             :                 "a"(VMWARE_HYPERVISOR_MAGIC),
     176             :                 "b"(0),
     177             :                 "c"(VMWARE_CMD_STEALCLOCK),
     178             :                 "d"(0),
     179             :                 "S"(arg1),
     180             :                 "D"(arg2) :
     181             :                 "memory");
     182           0 :         return result;
     183             : }
     184             : 
     185           0 : static bool stealclock_enable(phys_addr_t pa)
     186             : {
     187           0 :         return vmware_cmd_stealclock(upper_32_bits(pa),
     188             :                                      lower_32_bits(pa)) == STEALCLOCK_ENABLED;
     189             : }
     190             : 
     191           0 : static int __stealclock_disable(void)
     192             : {
     193           0 :         return vmware_cmd_stealclock(0, 1);
     194             : }
     195             : 
     196           0 : static void stealclock_disable(void)
     197             : {
     198           0 :         __stealclock_disable();
     199           0 : }
     200             : 
     201           0 : static bool vmware_is_stealclock_available(void)
     202             : {
     203           0 :         return __stealclock_disable() != STEALCLOCK_NOT_AVAILABLE;
     204             : }
     205             : 
     206             : /**
     207             :  * vmware_steal_clock() - read the per-cpu steal clock
     208             :  * @cpu:            the cpu number whose steal clock we want to read
     209             :  *
     210             :  * The function reads the steal clock if we are on a 64-bit system, otherwise
     211             :  * reads it in parts, checking that the high part didn't change in the
     212             :  * meantime.
     213             :  *
     214             :  * Return:
     215             :  *      The steal clock reading in ns.
     216             :  */
     217           0 : static uint64_t vmware_steal_clock(int cpu)
     218             : {
     219           0 :         struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
     220           0 :         uint64_t clock;
     221             : 
     222           0 :         if (IS_ENABLED(CONFIG_64BIT))
     223           0 :                 clock = READ_ONCE(steal->clock);
     224             :         else {
     225             :                 uint32_t initial_high, low, high;
     226             : 
     227             :                 do {
     228             :                         initial_high = READ_ONCE(steal->clock_high);
     229             :                         /* Do not reorder initial_high and high readings */
     230             :                         virt_rmb();
     231             :                         low = READ_ONCE(steal->clock_low);
     232             :                         /* Keep low reading in between */
     233             :                         virt_rmb();
     234             :                         high = READ_ONCE(steal->clock_high);
     235             :                 } while (initial_high != high);
     236             : 
     237             :                 clock = ((uint64_t)high << 32) | low;
     238             :         }
     239             : 
     240           0 :         return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
     241             :                              vmware_cyc2ns.cyc2ns_shift);
     242             : }
     243             : 
     244           0 : static void vmware_register_steal_time(void)
     245             : {
     246           0 :         int cpu = smp_processor_id();
     247           0 :         struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu);
     248             : 
     249           0 :         if (!has_steal_clock)
     250             :                 return;
     251             : 
     252           0 :         if (!stealclock_enable(slow_virt_to_phys(st))) {
     253           0 :                 has_steal_clock = false;
     254           0 :                 return;
     255             :         }
     256             : 
     257           0 :         pr_info("vmware-stealtime: cpu %d, pa %llx\n",
     258             :                 cpu, (unsigned long long) slow_virt_to_phys(st));
     259             : }
     260             : 
     261           0 : static void vmware_disable_steal_time(void)
     262             : {
     263           0 :         if (!has_steal_clock)
     264             :                 return;
     265             : 
     266           0 :         stealclock_disable();
     267             : }
     268             : 
     269           0 : static void vmware_guest_cpu_init(void)
     270             : {
     271           0 :         if (has_steal_clock)
     272           0 :                 vmware_register_steal_time();
     273             : }
     274             : 
     275           0 : static void vmware_pv_guest_cpu_reboot(void *unused)
     276             : {
     277           0 :         vmware_disable_steal_time();
     278           0 : }
     279             : 
     280           0 : static int vmware_pv_reboot_notify(struct notifier_block *nb,
     281             :                                 unsigned long code, void *unused)
     282             : {
     283           0 :         if (code == SYS_RESTART)
     284           0 :                 on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1);
     285           0 :         return NOTIFY_DONE;
     286             : }
     287             : 
     288             : static struct notifier_block vmware_pv_reboot_nb = {
     289             :         .notifier_call = vmware_pv_reboot_notify,
     290             : };
     291             : 
     292             : #ifdef CONFIG_SMP
     293           0 : static void __init vmware_smp_prepare_boot_cpu(void)
     294             : {
     295           0 :         vmware_guest_cpu_init();
     296           0 :         native_smp_prepare_boot_cpu();
     297           0 : }
     298             : 
     299           0 : static int vmware_cpu_online(unsigned int cpu)
     300             : {
     301           0 :         local_irq_disable();
     302           0 :         vmware_guest_cpu_init();
     303           0 :         local_irq_enable();
     304           0 :         return 0;
     305             : }
     306             : 
     307           0 : static int vmware_cpu_down_prepare(unsigned int cpu)
     308             : {
     309           0 :         local_irq_disable();
     310           0 :         vmware_disable_steal_time();
     311           0 :         local_irq_enable();
     312           0 :         return 0;
     313             : }
     314             : #endif
     315             : 
     316           1 : static __init int activate_jump_labels(void)
     317             : {
     318           1 :         if (has_steal_clock) {
     319           0 :                 static_key_slow_inc(&paravirt_steal_enabled);
     320           0 :                 if (steal_acc)
     321           0 :                         static_key_slow_inc(&paravirt_steal_rq_enabled);
     322             :         }
     323             : 
     324           1 :         return 0;
     325             : }
     326             : arch_initcall(activate_jump_labels);
     327             : 
     328           0 : static void __init vmware_paravirt_ops_setup(void)
     329             : {
     330           0 :         pv_info.name = "VMware hypervisor";
     331           0 :         pv_ops.cpu.io_delay = paravirt_nop;
     332             : 
     333           0 :         if (vmware_tsc_khz == 0)
     334             :                 return;
     335             : 
     336           0 :         vmware_cyc2ns_setup();
     337             : 
     338           0 :         if (vmw_sched_clock)
     339           0 :                 pv_ops.time.sched_clock = vmware_sched_clock;
     340             : 
     341           0 :         if (vmware_is_stealclock_available()) {
     342           0 :                 has_steal_clock = true;
     343           0 :                 pv_ops.time.steal_clock = vmware_steal_clock;
     344             : 
     345             :                 /* We use reboot notifier only to disable steal clock */
     346           0 :                 register_reboot_notifier(&vmware_pv_reboot_nb);
     347             : 
     348             : #ifdef CONFIG_SMP
     349           0 :                 smp_ops.smp_prepare_boot_cpu =
     350             :                         vmware_smp_prepare_boot_cpu;
     351           0 :                 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
     352             :                                               "x86/vmware:online",
     353             :                                               vmware_cpu_online,
     354             :                                               vmware_cpu_down_prepare) < 0)
     355           0 :                         pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n");
     356             : #else
     357             :                 vmware_guest_cpu_init();
     358             : #endif
     359             :         }
     360             : }
     361             : #else
     362             : #define vmware_paravirt_ops_setup() do {} while (0)
     363             : #endif
     364             : 
     365             : /*
     366             :  * VMware hypervisor takes care of exporting a reliable TSC to the guest.
     367             :  * Still, due to timing difference when running on virtual cpus, the TSC can
     368             :  * be marked as unstable in some cases. For example, the TSC sync check at
     369             :  * bootup can fail due to a marginal offset between vcpus' TSCs (though the
     370             :  * TSCs do not drift from each other).  Also, the ACPI PM timer clocksource
     371             :  * is not suitable as a watchdog when running on a hypervisor because the
     372             :  * kernel may miss a wrap of the counter if the vcpu is descheduled for a
     373             :  * long time. To skip these checks at runtime we set these capability bits,
     374             :  * so that the kernel could just trust the hypervisor with providing a
     375             :  * reliable virtual TSC that is suitable for timekeeping.
     376             :  */
     377           0 : static void __init vmware_set_capabilities(void)
     378             : {
     379           0 :         setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
     380           0 :         setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
     381           0 :         if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL)
     382           0 :                 setup_force_cpu_cap(X86_FEATURE_VMCALL);
     383           0 :         else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL)
     384           0 :                 setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL);
     385           0 : }
     386             : 
     387           0 : static void __init vmware_platform_setup(void)
     388             : {
     389           0 :         uint32_t eax, ebx, ecx, edx;
     390           0 :         uint64_t lpj, tsc_khz;
     391             : 
     392           0 :         VMWARE_CMD(GETHZ, eax, ebx, ecx, edx);
     393             : 
     394           0 :         if (ebx != UINT_MAX) {
     395           0 :                 lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
     396           0 :                 do_div(tsc_khz, 1000);
     397           0 :                 WARN_ON(tsc_khz >> 32);
     398           0 :                 pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
     399             :                         (unsigned long) tsc_khz / 1000,
     400             :                         (unsigned long) tsc_khz % 1000);
     401             : 
     402           0 :                 if (!preset_lpj) {
     403           0 :                         do_div(lpj, HZ);
     404           0 :                         preset_lpj = lpj;
     405             :                 }
     406             : 
     407           0 :                 vmware_tsc_khz = tsc_khz;
     408           0 :                 x86_platform.calibrate_tsc = vmware_get_tsc_khz;
     409           0 :                 x86_platform.calibrate_cpu = vmware_get_tsc_khz;
     410             : 
     411             : #ifdef CONFIG_X86_LOCAL_APIC
     412             :                 /* Skip lapic calibration since we know the bus frequency. */
     413           0 :                 lapic_timer_period = ecx / HZ;
     414           0 :                 pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
     415             :                         ecx);
     416             : #endif
     417             :         } else {
     418           0 :                 pr_warn("Failed to get TSC freq from the hypervisor\n");
     419             :         }
     420             : 
     421           0 :         vmware_paravirt_ops_setup();
     422             : 
     423             : #ifdef CONFIG_X86_IO_APIC
     424           0 :         no_timer_check = 1;
     425             : #endif
     426             : 
     427           0 :         vmware_set_capabilities();
     428           0 : }
     429             : 
     430           0 : static u8 __init vmware_select_hypercall(void)
     431             : {
     432           0 :         int eax, ebx, ecx, edx;
     433             : 
     434           0 :         cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx);
     435           0 :         return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL |
     436             :                        CPUID_VMWARE_FEATURES_ECX_VMCALL));
     437             : }
     438             : 
     439             : /*
     440             :  * While checking the dmi string information, just checking the product
     441             :  * serial key should be enough, as this will always have a VMware
     442             :  * specific string when running under VMware hypervisor.
     443             :  * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
     444             :  * intentionally defaults to 0.
     445             :  */
     446           1 : static uint32_t __init vmware_platform(void)
     447             : {
     448           1 :         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
     449           1 :                 unsigned int eax;
     450           1 :                 unsigned int hyper_vendor_id[3];
     451             : 
     452           1 :                 cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
     453             :                       &hyper_vendor_id[1], &hyper_vendor_id[2]);
     454           1 :                 if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) {
     455           0 :                         if (eax >= CPUID_VMWARE_FEATURES_LEAF)
     456           0 :                                 vmware_hypercall_mode =
     457           0 :                                         vmware_select_hypercall();
     458             : 
     459           0 :                         pr_info("hypercall mode: 0x%02x\n",
     460             :                                 (unsigned int) vmware_hypercall_mode);
     461             : 
     462           0 :                         return CPUID_VMWARE_INFO_LEAF;
     463             :                 }
     464             :         } else if (dmi_available && dmi_name_in_serial("VMware") &&
     465             :                    __vmware_platform())
     466             :                 return 1;
     467             : 
     468             :         return 0;
     469             : }
     470             : 
     471             : /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
     472           0 : static bool __init vmware_legacy_x2apic_available(void)
     473             : {
     474           0 :         uint32_t eax, ebx, ecx, edx;
     475           0 :         VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
     476           0 :         return (eax & (1 << VMWARE_CMD_VCPU_RESERVED)) == 0 &&
     477           0 :                (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0;
     478             : }
     479             : 
     480             : #ifdef CONFIG_AMD_MEM_ENCRYPT
     481             : static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
     482             :                                         struct pt_regs *regs)
     483             : {
     484             :         /* Copy VMWARE specific Hypercall parameters to the GHCB */
     485             :         ghcb_set_rip(ghcb, regs->ip);
     486             :         ghcb_set_rbx(ghcb, regs->bx);
     487             :         ghcb_set_rcx(ghcb, regs->cx);
     488             :         ghcb_set_rdx(ghcb, regs->dx);
     489             :         ghcb_set_rsi(ghcb, regs->si);
     490             :         ghcb_set_rdi(ghcb, regs->di);
     491             :         ghcb_set_rbp(ghcb, regs->bp);
     492             : }
     493             : 
     494             : static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
     495             : {
     496             :         if (!(ghcb_rbx_is_valid(ghcb) &&
     497             :               ghcb_rcx_is_valid(ghcb) &&
     498             :               ghcb_rdx_is_valid(ghcb) &&
     499             :               ghcb_rsi_is_valid(ghcb) &&
     500             :               ghcb_rdi_is_valid(ghcb) &&
     501             :               ghcb_rbp_is_valid(ghcb)))
     502             :                 return false;
     503             : 
     504             :         regs->bx = ghcb_get_rbx(ghcb);
     505             :         regs->cx = ghcb_get_rcx(ghcb);
     506             :         regs->dx = ghcb_get_rdx(ghcb);
     507             :         regs->si = ghcb_get_rsi(ghcb);
     508             :         regs->di = ghcb_get_rdi(ghcb);
     509             :         regs->bp = ghcb_get_rbp(ghcb);
     510             : 
     511             :         return true;
     512             : }
     513             : #endif
     514             : 
     515             : const __initconst struct hypervisor_x86 x86_hyper_vmware = {
     516             :         .name                           = "VMware",
     517             :         .detect                         = vmware_platform,
     518             :         .type                           = X86_HYPER_VMWARE,
     519             :         .init.init_platform             = vmware_platform_setup,
     520             :         .init.x2apic_available          = vmware_legacy_x2apic_available,
     521             : #ifdef CONFIG_AMD_MEM_ENCRYPT
     522             :         .runtime.sev_es_hcall_prepare   = vmware_sev_es_hcall_prepare,
     523             :         .runtime.sev_es_hcall_finish    = vmware_sev_es_hcall_finish,
     524             : #endif
     525             : };

Generated by: LCOV version 1.14