LCOV - code coverage report
Current view: top level - arch/x86/include/asm - text-patching.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 21 31 67.7 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_X86_TEXT_PATCHING_H
       3             : #define _ASM_X86_TEXT_PATCHING_H
       4             : 
       5             : #include <linux/types.h>
       6             : #include <linux/stddef.h>
       7             : #include <asm/ptrace.h>
       8             : 
       9             : struct paravirt_patch_site;
      10             : #ifdef CONFIG_PARAVIRT
      11             : void apply_paravirt(struct paravirt_patch_site *start,
      12             :                     struct paravirt_patch_site *end);
      13             : #else
      14             : static inline void apply_paravirt(struct paravirt_patch_site *start,
      15             :                                   struct paravirt_patch_site *end)
      16             : {}
      17             : #define __parainstructions      NULL
      18             : #define __parainstructions_end  NULL
      19             : #endif
      20             : 
      21             : /*
      22             :  * Currently, the max observed size in the kernel code is
      23             :  * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
      24             :  * Raise it if needed.
      25             :  */
      26             : #define POKE_MAX_OPCODE_SIZE    5
      27             : 
      28             : extern void text_poke_early(void *addr, const void *opcode, size_t len);
      29             : 
      30             : /*
      31             :  * Clear and restore the kernel write-protection flag on the local CPU.
      32             :  * Allows the kernel to edit read-only pages.
      33             :  * Side-effect: any interrupt handler running between save and restore will have
      34             :  * the ability to write to read-only pages.
      35             :  *
      36             :  * Warning:
      37             :  * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
      38             :  * no thread can be preempted in the instructions being modified (no iret to an
      39             :  * invalid instruction possible) or if the instructions are changed from a
      40             :  * consistent state to another consistent state atomically.
      41             :  * On the local CPU you need to be protected against NMI or MCE handlers seeing
      42             :  * an inconsistent instruction while you patch.
      43             :  */
      44             : extern void *text_poke(void *addr, const void *opcode, size_t len);
      45             : extern void text_poke_sync(void);
      46             : extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
      47             : extern int poke_int3_handler(struct pt_regs *regs);
      48             : extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
      49             : 
      50             : extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
      51             : extern void text_poke_finish(void);
      52             : 
      53             : #define INT3_INSN_SIZE          1
      54             : #define INT3_INSN_OPCODE        0xCC
      55             : 
      56             : #define RET_INSN_SIZE           1
      57             : #define RET_INSN_OPCODE         0xC3
      58             : 
      59             : #define CALL_INSN_SIZE          5
      60             : #define CALL_INSN_OPCODE        0xE8
      61             : 
      62             : #define JMP32_INSN_SIZE         5
      63             : #define JMP32_INSN_OPCODE       0xE9
      64             : 
      65             : #define JMP8_INSN_SIZE          2
      66             : #define JMP8_INSN_OPCODE        0xEB
      67             : 
      68             : #define DISP32_SIZE             4
      69             : 
      70         588 : static __always_inline int text_opcode_size(u8 opcode)
      71             : {
      72         588 :         int size = 0;
      73             : 
      74             : #define __CASE(insn)    \
      75             :         case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
      76             : 
      77         588 :         switch(opcode) {
      78             :         __CASE(INT3);
      79             :         __CASE(RET);
      80             :         __CASE(CALL);
      81             :         __CASE(JMP32);
      82             :         __CASE(JMP8);
      83             :         }
      84             : 
      85             : #undef __CASE
      86             : 
      87         588 :         return size;
      88             : }
      89             : 
      90             : union text_poke_insn {
      91             :         u8 text[POKE_MAX_OPCODE_SIZE];
      92             :         struct {
      93             :                 u8 opcode;
      94             :                 s32 disp;
      95             :         } __attribute__((packed));
      96             : };
      97             : 
      98             : static __always_inline
      99         550 : void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
     100             : {
     101         550 :         static union text_poke_insn insn; /* per instance */
     102         550 :         int size = text_opcode_size(opcode);
     103             : 
     104         550 :         insn.opcode = opcode;
     105             : 
     106         550 :         if (size > 1) {
     107         550 :                 insn.disp = (long)dest - (long)(addr + size);
     108         550 :                 if (size == 2) {
     109             :                         /*
     110             :                          * Ensure that for JMP9 the displacement
     111             :                          * actually fits the signed byte.
     112             :                          */
     113         550 :                         BUG_ON((insn.disp >> 31) != (insn.disp >> 7));
     114             :                 }
     115             :         }
     116             : 
     117         550 :         return &insn.text;
     118             : }
     119             : 
     120             : extern int after_bootmem;
     121             : extern __ro_after_init struct mm_struct *poking_mm;
     122             : extern __ro_after_init unsigned long poking_addr;
     123             : 
     124             : #ifndef CONFIG_UML_X86
     125             : static __always_inline
     126           1 : void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
     127             : {
     128           1 :         regs->ip = ip;
     129           0 : }
     130             : 
     131             : static __always_inline
     132           1 : void int3_emulate_push(struct pt_regs *regs, unsigned long val)
     133             : {
     134             :         /*
     135             :          * The int3 handler in entry_64.S adds a gap between the
     136             :          * stack where the break point happened, and the saving of
     137             :          * pt_regs. We can extend the original stack because of
     138             :          * this gap. See the idtentry macro's create_gap option.
     139             :          *
     140             :          * Similarly entry_32.S will have a gap on the stack for (any) hardware
     141             :          * exception and pt_regs; see FIXUP_FRAME.
     142             :          */
     143           1 :         regs->sp -= sizeof(unsigned long);
     144           1 :         *(unsigned long *)regs->sp = val;
     145             : }
     146             : 
     147             : static __always_inline
     148           0 : unsigned long int3_emulate_pop(struct pt_regs *regs)
     149             : {
     150           0 :         unsigned long val = *(unsigned long *)regs->sp;
     151           0 :         regs->sp += sizeof(unsigned long);
     152           0 :         return val;
     153             : }
     154             : 
     155             : static __always_inline
     156           1 : void int3_emulate_call(struct pt_regs *regs, unsigned long func)
     157             : {
     158           1 :         int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
     159           1 :         int3_emulate_jmp(regs, func);
     160           0 : }
     161             : 
     162             : static __always_inline
     163           0 : void int3_emulate_ret(struct pt_regs *regs)
     164             : {
     165           0 :         unsigned long ip = int3_emulate_pop(regs);
     166           0 :         int3_emulate_jmp(regs, ip);
     167           0 : }
     168             : #endif /* !CONFIG_UML_X86 */
     169             : 
     170             : #endif /* _ASM_X86_TEXT_PATCHING_H */

Generated by: LCOV version 1.14