Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #include <linux/stringify.h>
3 :
4 : #include <asm/paravirt.h>
5 : #include <asm/asm-offsets.h>
6 :
7 : #define PSTART(d, m) \
8 : patch_data_##d.m
9 :
10 : #define PEND(d, m) \
11 : (PSTART(d, m) + sizeof(patch_data_##d.m))
12 :
13 : #define PATCH(d, m, insn_buff, len) \
14 : paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
15 :
16 : #define PATCH_CASE(ops, m, data, insn_buff, len) \
17 : case PARAVIRT_PATCH(ops.m): \
18 : return PATCH(data, ops##_##m, insn_buff, len)
19 :
20 : #ifdef CONFIG_PARAVIRT_XXL
21 : struct patch_xxl {
22 : const unsigned char irq_irq_disable[1];
23 : const unsigned char irq_irq_enable[1];
24 : const unsigned char irq_save_fl[2];
25 : const unsigned char mmu_read_cr2[3];
26 : const unsigned char mmu_read_cr3[3];
27 : const unsigned char mmu_write_cr3[3];
28 : const unsigned char cpu_wbinvd[2];
29 : const unsigned char mov64[3];
30 : };
31 :
32 : static const struct patch_xxl patch_data_xxl = {
33 : .irq_irq_disable = { 0xfa }, // cli
34 : .irq_irq_enable = { 0xfb }, // sti
35 : .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
36 : .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
37 : .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
38 : .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
39 : .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
40 : .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
41 : };
42 :
43 : unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
44 : {
45 : return PATCH(xxl, mov64, insn_buff, len);
46 : }
47 : # endif /* CONFIG_PARAVIRT_XXL */
48 :
49 : #ifdef CONFIG_PARAVIRT_SPINLOCKS
50 : struct patch_lock {
51 : unsigned char queued_spin_unlock[3];
52 : unsigned char vcpu_is_preempted[2];
53 : };
54 :
55 : static const struct patch_lock patch_data_lock = {
56 : .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
57 :
58 : # ifdef CONFIG_X86_64
59 : .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
60 : # else
61 : .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
62 : # endif
63 : };
64 : #endif /* CONFIG_PARAVIRT_SPINLOCKS */
65 :
66 72 : unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
67 : unsigned int len)
68 : {
69 72 : switch (type) {
70 :
71 : #ifdef CONFIG_PARAVIRT_XXL
72 : PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
73 : PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
74 : PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
75 :
76 : PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
77 : PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
78 : PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
79 :
80 : PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
81 : #endif
82 :
83 : #ifdef CONFIG_PARAVIRT_SPINLOCKS
84 22 : case PARAVIRT_PATCH(lock.queued_spin_unlock):
85 22 : if (pv_is_native_spin_unlock())
86 0 : return PATCH(lock, queued_spin_unlock, insn_buff, len);
87 : break;
88 :
89 7 : case PARAVIRT_PATCH(lock.vcpu_is_preempted):
90 7 : if (pv_is_native_vcpu_is_preempted())
91 0 : return PATCH(lock, vcpu_is_preempted, insn_buff, len);
92 : break;
93 : #endif
94 : default:
95 : break;
96 : }
97 :
98 72 : return paravirt_patch_default(type, insn_buff, addr, len);
99 : }
|