LCOV - code coverage report
Current view: top level - arch/x86/kernel - kvm.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 208 417 49.9 %
Date: 2021-04-22 12:43:58 Functions: 27 52 51.9 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * KVM paravirt_ops implementation
       4             :  *
       5             :  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
       6             :  * Copyright IBM Corporation, 2007
       7             :  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
       8             :  */
       9             : 
      10             : #define pr_fmt(fmt) "kvm-guest: " fmt
      11             : 
      12             : #include <linux/context_tracking.h>
      13             : #include <linux/init.h>
      14             : #include <linux/irq.h>
      15             : #include <linux/kernel.h>
      16             : #include <linux/kvm_para.h>
      17             : #include <linux/cpu.h>
      18             : #include <linux/mm.h>
      19             : #include <linux/highmem.h>
      20             : #include <linux/hardirq.h>
      21             : #include <linux/notifier.h>
      22             : #include <linux/reboot.h>
      23             : #include <linux/hash.h>
      24             : #include <linux/sched.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/kprobes.h>
      27             : #include <linux/nmi.h>
      28             : #include <linux/swait.h>
      29             : #include <asm/timer.h>
      30             : #include <asm/cpu.h>
      31             : #include <asm/traps.h>
      32             : #include <asm/desc.h>
      33             : #include <asm/tlbflush.h>
      34             : #include <asm/apic.h>
      35             : #include <asm/apicdef.h>
      36             : #include <asm/hypervisor.h>
      37             : #include <asm/tlb.h>
      38             : #include <asm/cpuidle_haltpoll.h>
      39             : #include <asm/ptrace.h>
      40             : #include <asm/svm.h>
      41             : 
      42             : DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
      43             : 
      44             : static int kvmapf = 1;
      45             : 
      46           0 : static int __init parse_no_kvmapf(char *arg)
      47             : {
      48           0 :         kvmapf = 0;
      49           0 :         return 0;
      50             : }
      51             : 
      52             : early_param("no-kvmapf", parse_no_kvmapf);
      53             : 
      54             : static int steal_acc = 1;
      55           0 : static int __init parse_no_stealacc(char *arg)
      56             : {
      57           0 :         steal_acc = 0;
      58           0 :         return 0;
      59             : }
      60             : 
      61             : early_param("no-steal-acc", parse_no_stealacc);
      62             : 
      63             : static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
      64             : DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
      65             : static int has_steal_clock = 0;
      66             : 
      67             : /*
      68             :  * No need for any "IO delay" on KVM
      69             :  */
      70           0 : static void kvm_io_delay(void)
      71             : {
      72           0 : }
      73             : 
      74             : #define KVM_TASK_SLEEP_HASHBITS 8
      75             : #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
      76             : 
      77             : struct kvm_task_sleep_node {
      78             :         struct hlist_node link;
      79             :         struct swait_queue_head wq;
      80             :         u32 token;
      81             :         int cpu;
      82             : };
      83             : 
      84             : static struct kvm_task_sleep_head {
      85             :         raw_spinlock_t lock;
      86             :         struct hlist_head list;
      87             : } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
      88             : 
      89           0 : static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
      90             :                                                   u32 token)
      91             : {
      92           0 :         struct hlist_node *p;
      93             : 
      94           0 :         hlist_for_each(p, &b->list) {
      95           0 :                 struct kvm_task_sleep_node *n =
      96           0 :                         hlist_entry(p, typeof(*n), link);
      97           0 :                 if (n->token == token)
      98             :                         return n;
      99             :         }
     100             : 
     101             :         return NULL;
     102             : }
     103             : 
     104           0 : static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
     105             : {
     106           0 :         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
     107           0 :         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
     108           0 :         struct kvm_task_sleep_node *e;
     109             : 
     110           0 :         raw_spin_lock(&b->lock);
     111           0 :         e = _find_apf_task(b, token);
     112           0 :         if (e) {
     113             :                 /* dummy entry exist -> wake up was delivered ahead of PF */
     114           0 :                 hlist_del(&e->link);
     115           0 :                 raw_spin_unlock(&b->lock);
     116           0 :                 kfree(e);
     117           0 :                 return false;
     118             :         }
     119             : 
     120           0 :         n->token = token;
     121           0 :         n->cpu = smp_processor_id();
     122           0 :         init_swait_queue_head(&n->wq);
     123           0 :         hlist_add_head(&n->link, &b->list);
     124           0 :         raw_spin_unlock(&b->lock);
     125           0 :         return true;
     126             : }
     127             : 
     128             : /*
     129             :  * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
     130             :  * @token:      Token to identify the sleep node entry
     131             :  *
     132             :  * Invoked from the async pagefault handling code or from the VM exit page
     133             :  * fault handler. In both cases RCU is watching.
     134             :  */
     135           0 : void kvm_async_pf_task_wait_schedule(u32 token)
     136             : {
     137           0 :         struct kvm_task_sleep_node n;
     138           0 :         DECLARE_SWAITQUEUE(wait);
     139             : 
     140           0 :         lockdep_assert_irqs_disabled();
     141             : 
     142           0 :         if (!kvm_async_pf_queue_task(token, &n))
     143           0 :                 return;
     144             : 
     145           0 :         for (;;) {
     146           0 :                 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
     147           0 :                 if (hlist_unhashed(&n.link))
     148             :                         break;
     149             : 
     150           0 :                 local_irq_enable();
     151           0 :                 schedule();
     152           0 :                 local_irq_disable();
     153             :         }
     154           0 :         finish_swait(&n.wq, &wait);
     155             : }
     156             : EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
     157             : 
     158           0 : static void apf_task_wake_one(struct kvm_task_sleep_node *n)
     159             : {
     160           0 :         hlist_del_init(&n->link);
     161           0 :         if (swq_has_sleeper(&n->wq))
     162           0 :                 swake_up_one(&n->wq);
     163           0 : }
     164             : 
     165           0 : static void apf_task_wake_all(void)
     166             : {
     167           0 :         int i;
     168             : 
     169           0 :         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
     170           0 :                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
     171           0 :                 struct kvm_task_sleep_node *n;
     172           0 :                 struct hlist_node *p, *next;
     173             : 
     174           0 :                 raw_spin_lock(&b->lock);
     175           0 :                 hlist_for_each_safe(p, next, &b->list) {
     176           0 :                         n = hlist_entry(p, typeof(*n), link);
     177           0 :                         if (n->cpu == smp_processor_id())
     178           0 :                                 apf_task_wake_one(n);
     179             :                 }
     180           0 :                 raw_spin_unlock(&b->lock);
     181             :         }
     182           0 : }
     183             : 
     184           0 : void kvm_async_pf_task_wake(u32 token)
     185             : {
     186           0 :         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
     187           0 :         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
     188           0 :         struct kvm_task_sleep_node *n;
     189             : 
     190           0 :         if (token == ~0) {
     191           0 :                 apf_task_wake_all();
     192           0 :                 return;
     193             :         }
     194             : 
     195           0 : again:
     196           0 :         raw_spin_lock(&b->lock);
     197           0 :         n = _find_apf_task(b, token);
     198           0 :         if (!n) {
     199             :                 /*
     200             :                  * async PF was not yet handled.
     201             :                  * Add dummy entry for the token.
     202             :                  */
     203           0 :                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
     204           0 :                 if (!n) {
     205             :                         /*
     206             :                          * Allocation failed! Busy wait while other cpu
     207             :                          * handles async PF.
     208             :                          */
     209           0 :                         raw_spin_unlock(&b->lock);
     210           0 :                         cpu_relax();
     211           0 :                         goto again;
     212             :                 }
     213           0 :                 n->token = token;
     214           0 :                 n->cpu = smp_processor_id();
     215           0 :                 init_swait_queue_head(&n->wq);
     216           0 :                 hlist_add_head(&n->link, &b->list);
     217             :         } else {
     218           0 :                 apf_task_wake_one(n);
     219             :         }
     220           0 :         raw_spin_unlock(&b->lock);
     221           0 :         return;
     222             : }
     223             : EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
     224             : 
     225           0 : noinstr u32 kvm_read_and_reset_apf_flags(void)
     226             : {
     227           0 :         u32 flags = 0;
     228             : 
     229           0 :         if (__this_cpu_read(apf_reason.enabled)) {
     230           0 :                 flags = __this_cpu_read(apf_reason.flags);
     231           0 :                 __this_cpu_write(apf_reason.flags, 0);
     232             :         }
     233             : 
     234           0 :         return flags;
     235             : }
     236             : EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
     237             : 
     238           0 : noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
     239             : {
     240           0 :         u32 flags = kvm_read_and_reset_apf_flags();
     241           0 :         irqentry_state_t state;
     242             : 
     243           0 :         if (!flags)
     244             :                 return false;
     245             : 
     246           0 :         state = irqentry_enter(regs);
     247           0 :         instrumentation_begin();
     248             : 
     249             :         /*
     250             :          * If the host managed to inject an async #PF into an interrupt
     251             :          * disabled region, then die hard as this is not going to end well
     252             :          * and the host side is seriously broken.
     253             :          */
     254           0 :         if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
     255           0 :                 panic("Host injected async #PF in interrupt disabled region\n");
     256             : 
     257           0 :         if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
     258           0 :                 if (unlikely(!(user_mode(regs))))
     259           0 :                         panic("Host injected async #PF in kernel mode\n");
     260             :                 /* Page is swapped out by the host. */
     261           0 :                 kvm_async_pf_task_wait_schedule(token);
     262             :         } else {
     263           0 :                 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
     264             :         }
     265             : 
     266           0 :         instrumentation_end();
     267           0 :         irqentry_exit(regs, state);
     268           0 :         return true;
     269             : }
     270             : 
     271           0 : DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
     272             : {
     273           0 :         struct pt_regs *old_regs = set_irq_regs(regs);
     274           0 :         u32 token;
     275             : 
     276           0 :         ack_APIC_irq();
     277             : 
     278           0 :         inc_irq_stat(irq_hv_callback_count);
     279             : 
     280           0 :         if (__this_cpu_read(apf_reason.enabled)) {
     281           0 :                 token = __this_cpu_read(apf_reason.token);
     282           0 :                 kvm_async_pf_task_wake(token);
     283           0 :                 __this_cpu_write(apf_reason.token, 0);
     284           0 :                 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
     285             :         }
     286             : 
     287           0 :         set_irq_regs(old_regs);
     288           0 : }
     289             : 
     290           1 : static void __init paravirt_ops_setup(void)
     291             : {
     292           1 :         pv_info.name = "KVM";
     293             : 
     294           2 :         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
     295           1 :                 pv_ops.cpu.io_delay = kvm_io_delay;
     296             : 
     297             : #ifdef CONFIG_X86_IO_APIC
     298           1 :         no_timer_check = 1;
     299             : #endif
     300           1 : }
     301             : 
     302           4 : static void kvm_register_steal_time(void)
     303             : {
     304           4 :         int cpu = smp_processor_id();
     305           4 :         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
     306             : 
     307           4 :         if (!has_steal_clock)
     308             :                 return;
     309             : 
     310           4 :         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
     311           4 :         pr_info("stealtime: cpu %d, msr %llx\n", cpu,
     312             :                 (unsigned long long) slow_virt_to_phys(st));
     313             : }
     314             : 
     315             : static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
     316             : 
     317       89323 : static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
     318             : {
     319             :         /**
     320             :          * This relies on __test_and_clear_bit to modify the memory
     321             :          * in a way that is atomic with respect to the local CPU.
     322             :          * The hypervisor only accesses this memory from the local CPU so
     323             :          * there's no need for lock or memory barriers.
     324             :          * An optimization barrier is implied in apic write.
     325             :          */
     326      181008 :         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
     327             :                 return;
     328        2149 :         apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
     329             : }
     330             : 
     331           4 : static void kvm_guest_cpu_init(void)
     332             : {
     333           8 :         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
     334           0 :                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
     335             : 
     336           0 :                 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
     337             : 
     338           0 :                 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
     339           0 :                 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
     340             : 
     341           0 :                 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
     342           0 :                         pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
     343             : 
     344           0 :                 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
     345             : 
     346           0 :                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
     347           0 :                 __this_cpu_write(apf_reason.enabled, 1);
     348           0 :                 pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
     349             :         }
     350             : 
     351           8 :         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
     352           4 :                 unsigned long pa;
     353             : 
     354             :                 /* Size alignment is implied but just to make it explicit. */
     355           4 :                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
     356           4 :                 __this_cpu_write(kvm_apic_eoi, 0);
     357           4 :                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
     358             :                         | KVM_MSR_ENABLED;
     359           4 :                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
     360             :         }
     361             : 
     362           4 :         if (has_steal_clock)
     363           4 :                 kvm_register_steal_time();
     364           4 : }
     365             : 
     366           0 : static void kvm_pv_disable_apf(void)
     367             : {
     368           0 :         if (!__this_cpu_read(apf_reason.enabled))
     369             :                 return;
     370             : 
     371           0 :         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
     372           0 :         __this_cpu_write(apf_reason.enabled, 0);
     373             : 
     374           0 :         pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
     375             : }
     376             : 
     377           0 : static void kvm_pv_guest_cpu_reboot(void *unused)
     378             : {
     379             :         /*
     380             :          * We disable PV EOI before we load a new kernel by kexec,
     381             :          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
     382             :          * New kernel can re-enable when it boots.
     383             :          */
     384           0 :         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
     385           0 :                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
     386           0 :         kvm_pv_disable_apf();
     387           0 :         kvm_disable_steal_time();
     388           0 : }
     389             : 
     390           0 : static int kvm_pv_reboot_notify(struct notifier_block *nb,
     391             :                                 unsigned long code, void *unused)
     392             : {
     393           0 :         if (code == SYS_RESTART)
     394           0 :                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
     395           0 :         return NOTIFY_DONE;
     396             : }
     397             : 
     398             : static struct notifier_block kvm_pv_reboot_nb = {
     399             :         .notifier_call = kvm_pv_reboot_notify,
     400             : };
     401             : 
     402      264445 : static u64 kvm_steal_clock(int cpu)
     403             : {
     404      264445 :         u64 steal;
     405      264445 :         struct kvm_steal_time *src;
     406      264445 :         int version;
     407             : 
     408      264445 :         src = &per_cpu(steal_time, cpu);
     409      264446 :         do {
     410      264446 :                 version = src->version;
     411      264446 :                 virt_rmb();
     412      264400 :                 steal = src->steal;
     413      264400 :                 virt_rmb();
     414      264962 :         } while ((version & 1) || (version != src->version));
     415             : 
     416      264961 :         return steal;
     417             : }
     418             : 
     419           0 : void kvm_disable_steal_time(void)
     420             : {
     421           0 :         if (!has_steal_clock)
     422             :                 return;
     423             : 
     424           0 :         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
     425             : }
     426             : 
     427             : static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
     428             : {
     429             :         early_set_memory_decrypted((unsigned long) ptr, size);
     430             : }
     431             : 
     432             : /*
     433             :  * Iterate through all possible CPUs and map the memory region pointed
     434             :  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
     435             :  *
     436             :  * Note: we iterate through all possible CPUs to ensure that CPUs
     437             :  * hotplugged will have their per-cpu variable already mapped as
     438             :  * decrypted.
     439             :  */
     440           1 : static void __init sev_map_percpu_data(void)
     441             : {
     442           1 :         int cpu;
     443             : 
     444           1 :         if (!sev_active())
     445           1 :                 return;
     446             : 
     447             :         for_each_possible_cpu(cpu) {
     448             :                 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
     449             :                 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
     450             :                 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
     451             :         }
     452             : }
     453             : 
     454           2 : static bool pv_tlb_flush_supported(void)
     455             : {
     456           2 :         return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
     457           4 :                 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
     458           2 :                 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
     459             : }
     460             : 
     461             : static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
     462             : 
     463             : #ifdef CONFIG_SMP
     464             : 
     465           2 : static bool pv_ipi_supported(void)
     466             : {
     467           4 :         return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
     468             : }
     469             : 
     470           1 : static bool pv_sched_yield_supported(void)
     471             : {
     472           1 :         return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
     473           2 :                 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
     474           1 :             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
     475             : }
     476             : 
     477             : #define KVM_IPI_CLUSTER_SIZE    (2 * BITS_PER_LONG)
     478             : 
     479       14431 : static void __send_ipi_mask(const struct cpumask *mask, int vector)
     480             : {
     481       14431 :         unsigned long flags;
     482       14431 :         int cpu, apic_id, icr;
     483       14431 :         int min = 0, max = 0;
     484             : #ifdef CONFIG_X86_64
     485       14431 :         __uint128_t ipi_bitmap = 0;
     486             : #else
     487             :         u64 ipi_bitmap = 0;
     488             : #endif
     489       14431 :         long ret;
     490             : 
     491       14431 :         if (cpumask_empty(mask))
     492          14 :                 return;
     493             : 
     494       28834 :         local_irq_save(flags);
     495             : 
     496       14417 :         switch (vector) {
     497             :         default:
     498             :                 icr = APIC_DM_FIXED | vector;
     499             :                 break;
     500           0 :         case NMI_VECTOR:
     501           0 :                 icr = APIC_DM_NMI;
     502           0 :                 break;
     503             :         }
     504             : 
     505       28836 :         for_each_cpu(cpu, mask) {
     506       14419 :                 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
     507       14419 :                 if (!ipi_bitmap) {
     508             :                         min = max = apic_id;
     509           2 :                 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
     510           0 :                         ipi_bitmap <<= min - apic_id;
     511           0 :                         min = apic_id;
     512           2 :                 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
     513           2 :                         max = apic_id < max ? max : apic_id;
     514             :                 } else {
     515           0 :                         ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
     516           0 :                                 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
     517           0 :                         WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
     518             :                                   ret);
     519           0 :                         min = max = apic_id;
     520           0 :                         ipi_bitmap = 0;
     521             :                 }
     522       14419 :                 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
     523             :         }
     524             : 
     525       14417 :         if (ipi_bitmap) {
     526       28834 :                 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
     527       14417 :                         (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
     528       14417 :                 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
     529             :                           ret);
     530             :         }
     531             : 
     532       14862 :         local_irq_restore(flags);
     533             : }
     534             : 
     535       14430 : static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
     536             : {
     537       14430 :         __send_ipi_mask(mask, vector);
     538       14431 : }
     539             : 
     540           0 : static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
     541             : {
     542           0 :         unsigned int this_cpu = smp_processor_id();
     543           0 :         struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
     544           0 :         const struct cpumask *local_mask;
     545             : 
     546           0 :         cpumask_copy(new_mask, mask);
     547           0 :         cpumask_clear_cpu(this_cpu, new_mask);
     548           0 :         local_mask = new_mask;
     549           0 :         __send_ipi_mask(local_mask, vector);
     550           0 : }
     551             : 
     552             : /*
     553             :  * Set the IPI entry points
     554             :  */
     555           1 : static void kvm_setup_pv_ipi(void)
     556             : {
     557           1 :         apic->send_IPI_mask = kvm_send_ipi_mask;
     558           1 :         apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
     559           1 :         pr_info("setup PV IPIs\n");
     560           1 : }
     561             : 
     562          62 : static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
     563             : {
     564          62 :         int cpu;
     565             : 
     566          62 :         native_send_call_func_ipi(mask);
     567             : 
     568             :         /* Make sure other vCPUs get a chance to run if they need to. */
     569         181 :         for_each_cpu(cpu, mask) {
     570          89 :                 if (vcpu_is_preempted(cpu)) {
     571          32 :                         kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
     572          32 :                         break;
     573             :                 }
     574             :         }
     575          62 : }
     576             : 
     577           1 : static void __init kvm_smp_prepare_boot_cpu(void)
     578             : {
     579             :         /*
     580             :          * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
     581             :          * shares the guest physical address with the hypervisor.
     582             :          */
     583           1 :         sev_map_percpu_data();
     584             : 
     585           1 :         kvm_guest_cpu_init();
     586           1 :         native_smp_prepare_boot_cpu();
     587           1 :         kvm_spinlock_init();
     588           1 : }
     589             : 
     590           0 : static void kvm_guest_cpu_offline(void)
     591             : {
     592           0 :         kvm_disable_steal_time();
     593           0 :         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
     594           0 :                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
     595           0 :         kvm_pv_disable_apf();
     596           0 :         apf_task_wake_all();
     597           0 : }
     598             : 
     599           3 : static int kvm_cpu_online(unsigned int cpu)
     600             : {
     601           3 :         local_irq_disable();
     602           3 :         kvm_guest_cpu_init();
     603           3 :         local_irq_enable();
     604           3 :         return 0;
     605             : }
     606             : 
     607           0 : static int kvm_cpu_down_prepare(unsigned int cpu)
     608             : {
     609           0 :         local_irq_disable();
     610           0 :         kvm_guest_cpu_offline();
     611           0 :         local_irq_enable();
     612           0 :         return 0;
     613             : }
     614             : #endif
     615             : 
     616        3240 : static void kvm_flush_tlb_others(const struct cpumask *cpumask,
     617             :                         const struct flush_tlb_info *info)
     618             : {
     619        3240 :         u8 state;
     620        3240 :         int cpu;
     621        3240 :         struct kvm_steal_time *src;
     622        3240 :         struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
     623             : 
     624        3240 :         cpumask_copy(flushmask, cpumask);
     625             :         /*
     626             :          * We have to call flush only on online vCPUs. And
     627             :          * queue flush_on_enter for pre-empted vCPUs
     628             :          */
     629       12619 :         for_each_cpu(cpu, flushmask) {
     630        6139 :                 src = &per_cpu(steal_time, cpu);
     631        6139 :                 state = READ_ONCE(src->preempted);
     632        6139 :                 if ((state & KVM_VCPU_PREEMPTED)) {
     633        2677 :                         if (try_cmpxchg(&src->preempted, &state,
     634             :                                         state | KVM_VCPU_FLUSH_TLB))
     635       12056 :                                 __cpumask_clear_cpu(cpu, flushmask);
     636             :                 }
     637             :         }
     638             : 
     639        3240 :         native_flush_tlb_others(flushmask, info);
     640        3240 : }
     641             : 
     642           1 : static void __init kvm_guest_init(void)
     643             : {
     644           1 :         int i;
     645             : 
     646           1 :         paravirt_ops_setup();
     647           1 :         register_reboot_notifier(&kvm_pv_reboot_nb);
     648         258 :         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
     649         256 :                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
     650             : 
     651           2 :         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
     652           1 :                 has_steal_clock = 1;
     653           1 :                 pv_ops.time.steal_clock = kvm_steal_clock;
     654             :         }
     655             : 
     656           1 :         if (pv_tlb_flush_supported()) {
     657           1 :                 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
     658           1 :                 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
     659           1 :                 pr_info("KVM setup pv remote TLB flush\n");
     660             :         }
     661             : 
     662           2 :         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
     663           1 :                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
     664             : 
     665           2 :         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
     666           0 :                 static_branch_enable(&kvm_async_pf_enabled);
     667           0 :                 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
     668             :         }
     669             : 
     670             : #ifdef CONFIG_SMP
     671           1 :         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
     672           1 :         if (pv_sched_yield_supported()) {
     673           1 :                 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
     674           1 :                 pr_info("setup PV sched yield\n");
     675             :         }
     676           1 :         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
     677             :                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
     678           0 :                 pr_err("failed to install cpu hotplug callbacks\n");
     679             : #else
     680             :         sev_map_percpu_data();
     681             :         kvm_guest_cpu_init();
     682             : #endif
     683             : 
     684             :         /*
     685             :          * Hard lockup detection is enabled by default. Disable it, as guests
     686             :          * can get false positives too easily, for example if the host is
     687             :          * overcommitted.
     688             :          */
     689           1 :         hardlockup_detector_disable();
     690           1 : }
     691             : 
     692           1 : static noinline uint32_t __kvm_cpuid_base(void)
     693             : {
     694           1 :         if (boot_cpu_data.cpuid_level < 0)
     695             :                 return 0;       /* So we don't blow up on old processors */
     696             : 
     697           1 :         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
     698           1 :                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
     699             : 
     700             :         return 0;
     701             : }
     702             : 
     703          31 : static inline uint32_t kvm_cpuid_base(void)
     704             : {
     705          31 :         static int kvm_cpuid_base = -1;
     706             : 
     707          31 :         if (kvm_cpuid_base == -1)
     708           1 :                 kvm_cpuid_base = __kvm_cpuid_base();
     709             : 
     710          31 :         return kvm_cpuid_base;
     711             : }
     712             : 
     713           2 : bool kvm_para_available(void)
     714             : {
     715           1 :         return kvm_cpuid_base() != 0;
     716             : }
     717             : EXPORT_SYMBOL_GPL(kvm_para_available);
     718             : 
     719          24 : unsigned int kvm_arch_para_features(void)
     720             : {
     721          20 :         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
     722             : }
     723             : 
     724           4 : unsigned int kvm_arch_para_hints(void)
     725             : {
     726           4 :         return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
     727             : }
     728             : EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
     729             : 
     730           1 : static uint32_t __init kvm_detect(void)
     731             : {
     732           1 :         return kvm_cpuid_base();
     733             : }
     734             : 
     735           1 : static void __init kvm_apic_init(void)
     736             : {
     737             : #if defined(CONFIG_SMP)
     738           2 :         if (pv_ipi_supported())
     739           1 :                 kvm_setup_pv_ipi();
     740             : #endif
     741           1 : }
     742             : 
     743           0 : static bool __init kvm_msi_ext_dest_id(void)
     744             : {
     745           0 :         return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
     746             : }
     747             : 
     748           1 : static void __init kvm_init_platform(void)
     749             : {
     750           1 :         kvmclock_init();
     751           1 :         x86_platform.apic_post_init = kvm_apic_init;
     752           1 : }
     753             : 
     754             : #if defined(CONFIG_AMD_MEM_ENCRYPT)
     755             : static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
     756             : {
     757             :         /* RAX and CPL are already in the GHCB */
     758             :         ghcb_set_rbx(ghcb, regs->bx);
     759             :         ghcb_set_rcx(ghcb, regs->cx);
     760             :         ghcb_set_rdx(ghcb, regs->dx);
     761             :         ghcb_set_rsi(ghcb, regs->si);
     762             : }
     763             : 
     764             : static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
     765             : {
     766             :         /* No checking of the return state needed */
     767             :         return true;
     768             : }
     769             : #endif
     770             : 
     771             : const __initconst struct hypervisor_x86 x86_hyper_kvm = {
     772             :         .name                           = "KVM",
     773             :         .detect                         = kvm_detect,
     774             :         .type                           = X86_HYPER_KVM,
     775             :         .init.guest_late_init           = kvm_guest_init,
     776             :         .init.x2apic_available          = kvm_para_available,
     777             :         .init.msi_ext_dest_id           = kvm_msi_ext_dest_id,
     778             :         .init.init_platform             = kvm_init_platform,
     779             : #if defined(CONFIG_AMD_MEM_ENCRYPT)
     780             :         .runtime.sev_es_hcall_prepare   = kvm_sev_es_hcall_prepare,
     781             :         .runtime.sev_es_hcall_finish    = kvm_sev_es_hcall_finish,
     782             : #endif
     783             : };
     784             : 
     785           1 : static __init int activate_jump_labels(void)
     786             : {
     787           1 :         if (has_steal_clock) {
     788           1 :                 static_key_slow_inc(&paravirt_steal_enabled);
     789           1 :                 if (steal_acc)
     790           1 :                         static_key_slow_inc(&paravirt_steal_rq_enabled);
     791             :         }
     792             : 
     793           1 :         return 0;
     794             : }
     795             : arch_initcall(activate_jump_labels);
     796             : 
     797           1 : static __init int kvm_alloc_cpumask(void)
     798             : {
     799           1 :         int cpu;
     800           1 :         bool alloc = false;
     801             : 
     802           1 :         if (!kvm_para_available() || nopv)
     803             :                 return 0;
     804             : 
     805           1 :         if (pv_tlb_flush_supported())
     806           1 :                 alloc = true;
     807             : 
     808             : #if defined(CONFIG_SMP)
     809           2 :         if (pv_ipi_supported())
     810             :                 alloc = true;
     811             : #endif
     812             : 
     813           0 :         if (alloc)
     814           5 :                 for_each_possible_cpu(cpu) {
     815           5 :                         zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
     816             :                                 GFP_KERNEL, cpu_to_node(cpu));
     817             :                 }
     818             : 
     819             :         return 0;
     820             : }
     821             : arch_initcall(kvm_alloc_cpumask);
     822             : 
     823             : #ifdef CONFIG_PARAVIRT_SPINLOCKS
     824             : 
     825             : /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
     826          76 : static void kvm_kick_cpu(int cpu)
     827             : {
     828          76 :         int apicid;
     829          76 :         unsigned long flags = 0;
     830             : 
     831          76 :         apicid = per_cpu(x86_cpu_to_apicid, cpu);
     832          76 :         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
     833          76 : }
     834             : 
     835             : #include <asm/qspinlock.h>
     836             : 
     837          76 : static void kvm_wait(u8 *ptr, u8 val)
     838             : {
     839          76 :         unsigned long flags;
     840             : 
     841          76 :         if (in_nmi())
     842             :                 return;
     843             : 
     844         152 :         local_irq_save(flags);
     845             : 
     846          76 :         if (READ_ONCE(*ptr) != val)
     847           0 :                 goto out;
     848             : 
     849             :         /*
     850             :          * halt until it's our turn and kicked. Note that we do safe halt
     851             :          * for irq enabled case to avoid hang when lock info is overwritten
     852             :          * in irq spinlock slowpath and no spurious interrupt occur to save us.
     853             :          */
     854          76 :         if (arch_irqs_disabled_flags(flags))
     855          72 :                 halt();
     856             :         else
     857           4 :                 safe_halt();
     858             : 
     859          76 : out:
     860          76 :         local_irq_restore(flags);
     861             : }
     862             : 
     863             : #ifdef CONFIG_X86_32
     864             : __visible bool __kvm_vcpu_is_preempted(long cpu)
     865             : {
     866             :         struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
     867             : 
     868             :         return !!(src->preempted & KVM_VCPU_PREEMPTED);
     869             : }
     870             : PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
     871             : 
     872             : #else
     873             : 
     874             : #include <asm/asm-offsets.h>
     875             : 
     876             : extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
     877             : 
     878             : /*
     879             :  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
     880             :  * restoring to/from the stack.
     881             :  */
     882             : asm(
     883             : ".pushsection .text;"
     884             : ".global __raw_callee_save___kvm_vcpu_is_preempted;"
     885             : ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
     886             : "__raw_callee_save___kvm_vcpu_is_preempted:"
     887             : "movq      __per_cpu_offset(,%rdi,8), %rax;"
     888             : "cmpb      $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
     889             : "setne     %al;"
     890             : "ret;"
     891             : ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
     892             : ".popsection");
     893             : 
     894             : #endif
     895             : 
     896             : /*
     897             :  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
     898             :  */
     899           1 : void __init kvm_spinlock_init(void)
     900             : {
     901             :         /*
     902             :          * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
     903             :          * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
     904             :          * preferred over native qspinlock when vCPU is preempted.
     905             :          */
     906           2 :         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
     907           0 :                 pr_info("PV spinlocks disabled, no host support\n");
     908           0 :                 return;
     909             :         }
     910             : 
     911             :         /*
     912             :          * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
     913             :          * are available.
     914             :          */
     915           2 :         if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
     916           0 :                 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
     917           0 :                 goto out;
     918             :         }
     919             : 
     920           1 :         if (num_possible_cpus() == 1) {
     921           0 :                 pr_info("PV spinlocks disabled, single CPU\n");
     922           0 :                 goto out;
     923             :         }
     924             : 
     925           1 :         if (nopvspin) {
     926           0 :                 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
     927           0 :                 goto out;
     928             :         }
     929             : 
     930           1 :         pr_info("PV spinlocks enabled\n");
     931             : 
     932           1 :         __pv_init_lock_hash();
     933           1 :         pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
     934           1 :         pv_ops.lock.queued_spin_unlock =
     935             :                 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
     936           1 :         pv_ops.lock.wait = kvm_wait;
     937           1 :         pv_ops.lock.kick = kvm_kick_cpu;
     938             : 
     939           2 :         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
     940           1 :                 pv_ops.lock.vcpu_is_preempted =
     941             :                         PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
     942             :         }
     943             :         /*
     944             :          * When PV spinlock is enabled which is preferred over
     945             :          * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
     946             :          * Just disable it anyway.
     947             :          */
     948           0 : out:
     949           1 :         static_branch_disable(&virt_spin_lock_key);
     950             : }
     951             : 
     952             : #endif  /* CONFIG_PARAVIRT_SPINLOCKS */
     953             : 
     954             : #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
     955             : 
     956           0 : static void kvm_disable_host_haltpoll(void *i)
     957             : {
     958           0 :         wrmsrl(MSR_KVM_POLL_CONTROL, 0);
     959           0 : }
     960             : 
     961           0 : static void kvm_enable_host_haltpoll(void *i)
     962             : {
     963           0 :         wrmsrl(MSR_KVM_POLL_CONTROL, 1);
     964           0 : }
     965             : 
     966           0 : void arch_haltpoll_enable(unsigned int cpu)
     967             : {
     968           0 :         if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
     969           0 :                 pr_err_once("host does not support poll control\n");
     970           0 :                 pr_err_once("host upgrade recommended\n");
     971           0 :                 return;
     972             :         }
     973             : 
     974             :         /* Enable guest halt poll disables host halt poll */
     975           0 :         smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
     976             : }
     977             : EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
     978             : 
     979           0 : void arch_haltpoll_disable(unsigned int cpu)
     980             : {
     981           0 :         if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
     982             :                 return;
     983             : 
     984             :         /* Disable guest halt poll enables host halt poll */
     985           0 :         smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
     986             : }
     987             : EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
     988             : #endif

Generated by: LCOV version 1.14