LCOV - code coverage report
Current view: top level - arch/x86/include/asm - switch_to.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 6 7 85.7 %
Date: 2021-04-22 12:43:58 Functions: 1 1 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_X86_SWITCH_TO_H
       3             : #define _ASM_X86_SWITCH_TO_H
       4             : 
       5             : #include <linux/sched/task_stack.h>
       6             : 
       7             : struct task_struct; /* one of the stranger aspects of C forward declarations */
       8             : 
       9             : struct task_struct *__switch_to_asm(struct task_struct *prev,
      10             :                                     struct task_struct *next);
      11             : 
      12             : __visible struct task_struct *__switch_to(struct task_struct *prev,
      13             :                                           struct task_struct *next);
      14             : 
      15             : asmlinkage void ret_from_fork(void);
      16             : 
      17             : /*
      18             :  * This is the structure pointed to by thread.sp for an inactive task.  The
      19             :  * order of the fields must match the code in __switch_to_asm().
      20             :  */
      21             : struct inactive_task_frame {
      22             : #ifdef CONFIG_X86_64
      23             :         unsigned long r15;
      24             :         unsigned long r14;
      25             :         unsigned long r13;
      26             :         unsigned long r12;
      27             : #else
      28             :         unsigned long flags;
      29             :         unsigned long si;
      30             :         unsigned long di;
      31             : #endif
      32             :         unsigned long bx;
      33             : 
      34             :         /*
      35             :          * These two fields must be together.  They form a stack frame header,
      36             :          * needed by get_frame_pointer().
      37             :          */
      38             :         unsigned long bp;
      39             :         unsigned long ret_addr;
      40             : };
      41             : 
      42             : struct fork_frame {
      43             :         struct inactive_task_frame frame;
      44             :         struct pt_regs regs;
      45             : };
      46             : 
      47             : #define switch_to(prev, next, last)                                     \
      48             : do {                                                                    \
      49             :         ((last) = __switch_to_asm((prev), (next)));                     \
      50             : } while (0)
      51             : 
      52             : #ifdef CONFIG_X86_32
      53             : static inline void refresh_sysenter_cs(struct thread_struct *thread)
      54             : {
      55             :         /* Only happens when SEP is enabled, no need to test "SEP"arately: */
      56             :         if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
      57             :                 return;
      58             : 
      59             :         this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
      60             :         wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
      61             : }
      62             : #endif
      63             : 
      64             : /* This is used when switching tasks or entering/exiting vm86 mode. */
      65       53474 : static inline void update_task_stack(struct task_struct *task)
      66             : {
      67             :         /* sp0 always points to the entry trampoline stack, which is constant: */
      68             : #ifdef CONFIG_X86_32
      69             :         if (static_cpu_has(X86_FEATURE_XENPV))
      70             :                 load_sp0(task->thread.sp0);
      71             :         else
      72             :                 this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
      73             : #else
      74             :         /*
      75             :          * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
      76             :          * doesn't work on x86-32 because sp1 and
      77             :          * cpu_current_top_of_stack have different values (because of
      78             :          * the non-zero stack-padding on 32bit).
      79             :          */
      80       53474 :         if (static_cpu_has(X86_FEATURE_XENPV))
      81           0 :                 load_sp0(task_top_of_stack(task));
      82             : #endif
      83       53475 : }
      84             : 
      85          53 : static inline void kthread_frame_init(struct inactive_task_frame *frame,
      86             :                                       unsigned long fun, unsigned long arg)
      87             : {
      88          53 :         frame->bx = fun;
      89             : #ifdef CONFIG_X86_32
      90             :         frame->di = arg;
      91             : #else
      92          53 :         frame->r12 = arg;
      93             : #endif
      94             : }
      95             : 
      96             : #endif /* _ASM_X86_SWITCH_TO_H */

Generated by: LCOV version 1.14