LCOV - code coverage report
Current view: top level - arch/x86/kernel - step.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 6 70 8.6 %
Date: 2021-04-22 12:43:58 Functions: 1 8 12.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * x86 single-step support code, common to 32-bit and 64-bit.
       4             :  */
       5             : #include <linux/sched.h>
       6             : #include <linux/sched/task_stack.h>
       7             : #include <linux/mm.h>
       8             : #include <linux/ptrace.h>
       9             : #include <asm/desc.h>
      10             : #include <asm/mmu_context.h>
      11             : 
      12           0 : unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
      13             : {
      14           0 :         unsigned long addr, seg;
      15             : 
      16           0 :         addr = regs->ip;
      17           0 :         seg = regs->cs;
      18           0 :         if (v8086_mode(regs)) {
      19             :                 addr = (addr & 0xffff) + (seg << 4);
      20             :                 return addr;
      21             :         }
      22             : 
      23             : #ifdef CONFIG_MODIFY_LDT_SYSCALL
      24             :         /*
      25             :          * We'll assume that the code segments in the GDT
      26             :          * are all zero-based. That is largely true: the
      27             :          * TLS segments are used for data, and the PNPBIOS
      28             :          * and APM bios ones we just ignore here.
      29             :          */
      30             :         if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
      31             :                 struct desc_struct *desc;
      32             :                 unsigned long base;
      33             : 
      34             :                 seg >>= 3;
      35             : 
      36             :                 mutex_lock(&child->mm->context.lock);
      37             :                 if (unlikely(!child->mm->context.ldt ||
      38             :                              seg >= child->mm->context.ldt->nr_entries))
      39             :                         addr = -1L; /* bogus selector, access would fault */
      40             :                 else {
      41             :                         desc = &child->mm->context.ldt->entries[seg];
      42             :                         base = get_desc_base(desc);
      43             : 
      44             :                         /* 16-bit code segment? */
      45             :                         if (!desc->d)
      46             :                                 addr &= 0xffff;
      47             :                         addr += base;
      48             :                 }
      49             :                 mutex_unlock(&child->mm->context.lock);
      50             :         }
      51             : #endif
      52             : 
      53           0 :         return addr;
      54             : }
      55             : 
      56           0 : static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
      57             : {
      58           0 :         int i, copied;
      59           0 :         unsigned char opcode[15];
      60           0 :         unsigned long addr = convert_ip_to_linear(child, regs);
      61             : 
      62           0 :         copied = access_process_vm(child, addr, opcode, sizeof(opcode),
      63             :                         FOLL_FORCE);
      64           0 :         for (i = 0; i < copied; i++) {
      65           0 :                 switch (opcode[i]) {
      66             :                 /* popf and iret */
      67             :                 case 0x9d: case 0xcf:
      68             :                         return 1;
      69             : 
      70             :                         /* CHECKME: 64 65 */
      71             : 
      72             :                 /* opcode and address size prefixes */
      73           0 :                 case 0x66: case 0x67:
      74           0 :                         continue;
      75             :                 /* irrelevant prefixes (segment overrides and repeats) */
      76           0 :                 case 0x26: case 0x2e:
      77             :                 case 0x36: case 0x3e:
      78             :                 case 0x64: case 0x65:
      79             :                 case 0xf0: case 0xf2: case 0xf3:
      80           0 :                         continue;
      81             : 
      82             : #ifdef CONFIG_X86_64
      83             :                 case 0x40 ... 0x4f:
      84           0 :                         if (!user_64bit_mode(regs))
      85             :                                 /* 32-bit mode: register increment */
      86             :                                 return 0;
      87             :                         /* 64-bit mode: REX prefix */
      88           0 :                         continue;
      89             : #endif
      90             : 
      91             :                         /* CHECKME: f2, f3 */
      92             : 
      93             :                 /*
      94             :                  * pushf: NOTE! We should probably not let
      95             :                  * the user see the TF bit being set. But
      96             :                  * it's more pain than it's worth to avoid
      97             :                  * it, and a debugger could emulate this
      98             :                  * all in user space if it _really_ cares.
      99             :                  */
     100           0 :                 case 0x9c:
     101             :                 default:
     102           0 :                         return 0;
     103             :                 }
     104             :         }
     105             :         return 0;
     106             : }
     107             : 
     108             : /*
     109             :  * Enable single-stepping.  Return nonzero if user mode is not using TF itself.
     110             :  */
     111           0 : static int enable_single_step(struct task_struct *child)
     112             : {
     113           0 :         struct pt_regs *regs = task_pt_regs(child);
     114           0 :         unsigned long oflags;
     115             : 
     116             :         /*
     117             :          * If we stepped into a sysenter/syscall insn, it trapped in
     118             :          * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
     119             :          * If user-mode had set TF itself, then it's still clear from
     120             :          * do_debug() and we need to set it again to restore the user
     121             :          * state so we don't wrongly set TIF_FORCED_TF below.
     122             :          * If enable_single_step() was used last and that is what
     123             :          * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
     124             :          * already set and our bookkeeping is fine.
     125             :          */
     126           0 :         if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
     127           0 :                 regs->flags |= X86_EFLAGS_TF;
     128             : 
     129             :         /*
     130             :          * Always set TIF_SINGLESTEP.  This will also
     131             :          * cause us to set TF when returning to user mode.
     132             :          */
     133           0 :         set_tsk_thread_flag(child, TIF_SINGLESTEP);
     134             : 
     135             :         /*
     136             :          * Ensure that a trap is triggered once stepping out of a system
     137             :          * call prior to executing any user instruction.
     138             :          */
     139           0 :         set_task_syscall_work(child, SYSCALL_EXIT_TRAP);
     140             : 
     141           0 :         oflags = regs->flags;
     142             : 
     143             :         /* Set TF on the kernel stack.. */
     144           0 :         regs->flags |= X86_EFLAGS_TF;
     145             : 
     146             :         /*
     147             :          * ..but if TF is changed by the instruction we will trace,
     148             :          * don't mark it as being "us" that set it, so that we
     149             :          * won't clear it by hand later.
     150             :          *
     151             :          * Note that if we don't actually execute the popf because
     152             :          * of a signal arriving right now or suchlike, we will lose
     153             :          * track of the fact that it really was "us" that set it.
     154             :          */
     155           0 :         if (is_setting_trap_flag(child, regs)) {
     156           0 :                 clear_tsk_thread_flag(child, TIF_FORCED_TF);
     157           0 :                 return 0;
     158             :         }
     159             : 
     160             :         /*
     161             :          * If TF was already set, check whether it was us who set it.
     162             :          * If not, we should never attempt a block step.
     163             :          */
     164           0 :         if (oflags & X86_EFLAGS_TF)
     165           0 :                 return test_tsk_thread_flag(child, TIF_FORCED_TF);
     166             : 
     167           0 :         set_tsk_thread_flag(child, TIF_FORCED_TF);
     168             : 
     169           0 :         return 1;
     170             : }
     171             : 
     172           0 : void set_task_blockstep(struct task_struct *task, bool on)
     173             : {
     174           0 :         unsigned long debugctl;
     175             : 
     176             :         /*
     177             :          * Ensure irq/preemption can't change debugctl in between.
     178             :          * Note also that both TIF_BLOCKSTEP and debugctl should
     179             :          * be changed atomically wrt preemption.
     180             :          *
     181             :          * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
     182             :          * task is current or it can't be running, otherwise we can race
     183             :          * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
     184             :          * PTRACE_KILL is not safe.
     185             :          */
     186           0 :         local_irq_disable();
     187           0 :         debugctl = get_debugctlmsr();
     188           0 :         if (on) {
     189           0 :                 debugctl |= DEBUGCTLMSR_BTF;
     190           0 :                 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
     191             :         } else {
     192           0 :                 debugctl &= ~DEBUGCTLMSR_BTF;
     193           0 :                 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
     194             :         }
     195           0 :         if (task == current)
     196           0 :                 update_debugctlmsr(debugctl);
     197           0 :         local_irq_enable();
     198           0 : }
     199             : 
     200             : /*
     201             :  * Enable single or block step.
     202             :  */
     203           0 : static void enable_step(struct task_struct *child, bool block)
     204             : {
     205             :         /*
     206             :          * Make sure block stepping (BTF) is not enabled unless it should be.
     207             :          * Note that we don't try to worry about any is_setting_trap_flag()
     208             :          * instructions after the first when using block stepping.
     209             :          * So no one should try to use debugger block stepping in a program
     210             :          * that uses user-mode single stepping itself.
     211             :          */
     212           0 :         if (enable_single_step(child) && block)
     213           0 :                 set_task_blockstep(child, true);
     214           0 :         else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
     215           0 :                 set_task_blockstep(child, false);
     216           0 : }
     217             : 
     218           0 : void user_enable_single_step(struct task_struct *child)
     219             : {
     220           0 :         enable_step(child, 0);
     221           0 : }
     222             : 
     223           0 : void user_enable_block_step(struct task_struct *child)
     224             : {
     225           0 :         enable_step(child, 1);
     226           0 : }
     227             : 
     228        2646 : void user_disable_single_step(struct task_struct *child)
     229             : {
     230             :         /*
     231             :          * Make sure block stepping (BTF) is disabled.
     232             :          */
     233        2646 :         if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
     234           0 :                 set_task_blockstep(child, false);
     235             : 
     236             :         /* Always clear TIF_SINGLESTEP... */
     237        2646 :         clear_tsk_thread_flag(child, TIF_SINGLESTEP);
     238        2646 :         clear_task_syscall_work(child, SYSCALL_EXIT_TRAP);
     239             : 
     240             :         /* But touch TF only if it was set by us.. */
     241        2646 :         if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
     242           0 :                 task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
     243        2646 : }

Generated by: LCOV version 1.14