LCOV - code coverage report
Current view: top level - arch/x86/include/asm - cpu_entry_area.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 2 2 100.0 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : 
       3             : #ifndef _ASM_X86_CPU_ENTRY_AREA_H
       4             : #define _ASM_X86_CPU_ENTRY_AREA_H
       5             : 
       6             : #include <linux/percpu-defs.h>
       7             : #include <asm/processor.h>
       8             : #include <asm/intel_ds.h>
       9             : #include <asm/pgtable_areas.h>
      10             : 
      11             : #ifdef CONFIG_X86_64
      12             : 
      13             : /* Macro to enforce the same ordering and stack sizes */
      14             : #define ESTACKS_MEMBERS(guardsize, optional_stack_size)         \
      15             :         char    DF_stack_guard[guardsize];                      \
      16             :         char    DF_stack[EXCEPTION_STKSZ];                      \
      17             :         char    NMI_stack_guard[guardsize];                     \
      18             :         char    NMI_stack[EXCEPTION_STKSZ];                     \
      19             :         char    DB_stack_guard[guardsize];                      \
      20             :         char    DB_stack[EXCEPTION_STKSZ];                      \
      21             :         char    MCE_stack_guard[guardsize];                     \
      22             :         char    MCE_stack[EXCEPTION_STKSZ];                     \
      23             :         char    VC_stack_guard[guardsize];                      \
      24             :         char    VC_stack[optional_stack_size];                  \
      25             :         char    VC2_stack_guard[guardsize];                     \
      26             :         char    VC2_stack[optional_stack_size];                 \
      27             :         char    IST_top_guard[guardsize];                       \
      28             : 
      29             : /* The exception stacks' physical storage. No guard pages required */
      30             : struct exception_stacks {
      31             :         ESTACKS_MEMBERS(0, 0)
      32             : };
      33             : 
      34             : /* The effective cpu entry area mapping with guard pages. */
      35             : struct cea_exception_stacks {
      36             :         ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
      37             : };
      38             : 
      39             : /*
      40             :  * The exception stack ordering in [cea_]exception_stacks
      41             :  */
      42             : enum exception_stack_ordering {
      43             :         ESTACK_DF,
      44             :         ESTACK_NMI,
      45             :         ESTACK_DB,
      46             :         ESTACK_MCE,
      47             :         ESTACK_VC,
      48             :         ESTACK_VC2,
      49             :         N_EXCEPTION_STACKS
      50             : };
      51             : 
      52             : #define CEA_ESTACK_SIZE(st)                                     \
      53             :         sizeof(((struct cea_exception_stacks *)0)->st## _stack)
      54             : 
      55             : #define CEA_ESTACK_BOT(ceastp, st)                              \
      56             :         ((unsigned long)&(ceastp)->st## _stack)
      57             : 
      58             : #define CEA_ESTACK_TOP(ceastp, st)                              \
      59             :         (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
      60             : 
      61             : #define CEA_ESTACK_OFFS(st)                                     \
      62             :         offsetof(struct cea_exception_stacks, st## _stack)
      63             : 
      64             : #define CEA_ESTACK_PAGES                                        \
      65             :         (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
      66             : 
      67             : #endif
      68             : 
      69             : #ifdef CONFIG_X86_32
      70             : struct doublefault_stack {
      71             :         unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
      72             :         struct x86_hw_tss tss;
      73             : } __aligned(PAGE_SIZE);
      74             : #endif
      75             : 
      76             : /*
      77             :  * cpu_entry_area is a percpu region that contains things needed by the CPU
      78             :  * and early entry/exit code.  Real types aren't used for all fields here
      79             :  * to avoid circular header dependencies.
      80             :  *
      81             :  * Every field is a virtual alias of some other allocated backing store.
      82             :  * There is no direct allocation of a struct cpu_entry_area.
      83             :  */
      84             : struct cpu_entry_area {
      85             :         char gdt[PAGE_SIZE];
      86             : 
      87             :         /*
      88             :          * The GDT is just below entry_stack and thus serves (on x86_64) as
      89             :          * a read-only guard page. On 32-bit the GDT must be writeable, so
      90             :          * it needs an extra guard page.
      91             :          */
      92             : #ifdef CONFIG_X86_32
      93             :         char guard_entry_stack[PAGE_SIZE];
      94             : #endif
      95             :         struct entry_stack_page entry_stack_page;
      96             : 
      97             : #ifdef CONFIG_X86_32
      98             :         char guard_doublefault_stack[PAGE_SIZE];
      99             :         struct doublefault_stack doublefault_stack;
     100             : #endif
     101             : 
     102             :         /*
     103             :          * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
     104             :          * we need task switches to work, and task switches write to the TSS.
     105             :          */
     106             :         struct tss_struct tss;
     107             : 
     108             : #ifdef CONFIG_X86_64
     109             :         /*
     110             :          * Exception stacks used for IST entries with guard pages.
     111             :          */
     112             :         struct cea_exception_stacks estacks;
     113             : #endif
     114             :         /*
     115             :          * Per CPU debug store for Intel performance monitoring. Wastes a
     116             :          * full page at the moment.
     117             :          */
     118             :         struct debug_store cpu_debug_store;
     119             :         /*
     120             :          * The actual PEBS/BTS buffers must be mapped to user space
     121             :          * Reserve enough fixmap PTEs.
     122             :          */
     123             :         struct debug_store_buffers cpu_debug_buffers;
     124             : };
     125             : 
     126             : #define CPU_ENTRY_AREA_SIZE             (sizeof(struct cpu_entry_area))
     127             : #define CPU_ENTRY_AREA_ARRAY_SIZE       (CPU_ENTRY_AREA_SIZE * NR_CPUS)
     128             : 
     129             : /* Total size includes the readonly IDT mapping page as well: */
     130             : #define CPU_ENTRY_AREA_TOTAL_SIZE       (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
     131             : 
     132             : DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
     133             : DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
     134             : 
     135             : extern void setup_cpu_entry_areas(void);
     136             : extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
     137             : 
     138             : extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
     139             : 
     140           8 : static inline struct entry_stack *cpu_entry_stack(int cpu)
     141             : {
     142           8 :         return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
     143             : }
     144             : 
     145             : #define __this_cpu_ist_top_va(name)                                     \
     146             :         CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
     147             : 
     148             : #define __this_cpu_ist_bottom_va(name)                                  \
     149             :         CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
     150             : 
     151             : #endif

Generated by: LCOV version 1.14