LCOV - code coverage report
Current view: top level - arch/x86/kernel - tls.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 112 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 14 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/kernel.h>
       3             : #include <linux/errno.h>
       4             : #include <linux/sched.h>
       5             : #include <linux/user.h>
       6             : #include <linux/regset.h>
       7             : #include <linux/syscalls.h>
       8             : #include <linux/nospec.h>
       9             : 
      10             : #include <linux/uaccess.h>
      11             : #include <asm/desc.h>
      12             : #include <asm/ldt.h>
      13             : #include <asm/processor.h>
      14             : #include <asm/proto.h>
      15             : 
      16             : #include "tls.h"
      17             : 
      18             : /*
      19             :  * sys_alloc_thread_area: get a yet unused TLS descriptor index.
      20             :  */
      21           0 : static int get_free_idx(void)
      22             : {
      23           0 :         struct thread_struct *t = &current->thread;
      24           0 :         int idx;
      25             : 
      26           0 :         for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
      27           0 :                 if (desc_empty(&t->tls_array[idx]))
      28           0 :                         return idx + GDT_ENTRY_TLS_MIN;
      29             :         return -ESRCH;
      30             : }
      31             : 
      32           0 : static bool tls_desc_okay(const struct user_desc *info)
      33             : {
      34             :         /*
      35             :          * For historical reasons (i.e. no one ever documented how any
      36             :          * of the segmentation APIs work), user programs can and do
      37             :          * assume that a struct user_desc that's all zeros except for
      38             :          * entry_number means "no segment at all".  This never actually
      39             :          * worked.  In fact, up to Linux 3.19, a struct user_desc like
      40             :          * this would create a 16-bit read-write segment with base and
      41             :          * limit both equal to zero.
      42             :          *
      43             :          * That was close enough to "no segment at all" until we
      44             :          * hardened this function to disallow 16-bit TLS segments.  Fix
      45             :          * it up by interpreting these zeroed segments the way that they
      46             :          * were almost certainly intended to be interpreted.
      47             :          *
      48             :          * The correct way to ask for "no segment at all" is to specify
      49             :          * a user_desc that satisfies LDT_empty.  To keep everything
      50             :          * working, we accept both.
      51             :          *
      52             :          * Note that there's a similar kludge in modify_ldt -- look at
      53             :          * the distinction between modes 1 and 0x11.
      54             :          */
      55           0 :         if (LDT_empty(info) || LDT_zero(info))
      56             :                 return true;
      57             : 
      58             :         /*
      59             :          * espfix is required for 16-bit data segments, but espfix
      60             :          * only works for LDT segments.
      61             :          */
      62           0 :         if (!info->seg_32bit)
      63             :                 return false;
      64             : 
      65             :         /* Only allow data segments in the TLS array. */
      66           0 :         if (info->contents > 1)
      67             :                 return false;
      68             : 
      69             :         /*
      70             :          * Non-present segments with DPL 3 present an interesting attack
      71             :          * surface.  The kernel should handle such segments correctly,
      72             :          * but TLS is very difficult to protect in a sandbox, so prevent
      73             :          * such segments from being created.
      74             :          *
      75             :          * If userspace needs to remove a TLS entry, it can still delete
      76             :          * it outright.
      77             :          */
      78           0 :         if (info->seg_not_present)
      79           0 :                 return false;
      80             : 
      81             :         return true;
      82             : }
      83             : 
      84           0 : static void set_tls_desc(struct task_struct *p, int idx,
      85             :                          const struct user_desc *info, int n)
      86             : {
      87           0 :         struct thread_struct *t = &p->thread;
      88           0 :         struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
      89           0 :         int cpu;
      90             : 
      91             :         /*
      92             :          * We must not get preempted while modifying the TLS.
      93             :          */
      94           0 :         cpu = get_cpu();
      95             : 
      96           0 :         while (n-- > 0) {
      97           0 :                 if (LDT_empty(info) || LDT_zero(info))
      98           0 :                         memset(desc, 0, sizeof(*desc));
      99             :                 else
     100           0 :                         fill_ldt(desc, info);
     101           0 :                 ++info;
     102           0 :                 ++desc;
     103             :         }
     104             : 
     105           0 :         if (t == &current->thread)
     106           0 :                 load_TLS(t, cpu);
     107             : 
     108           0 :         put_cpu();
     109           0 : }
     110             : 
     111             : /*
     112             :  * Set a given TLS descriptor:
     113             :  */
     114           0 : int do_set_thread_area(struct task_struct *p, int idx,
     115             :                        struct user_desc __user *u_info,
     116             :                        int can_allocate)
     117             : {
     118           0 :         struct user_desc info;
     119           0 :         unsigned short __maybe_unused sel, modified_sel;
     120             : 
     121           0 :         if (copy_from_user(&info, u_info, sizeof(info)))
     122             :                 return -EFAULT;
     123             : 
     124           0 :         if (!tls_desc_okay(&info))
     125             :                 return -EINVAL;
     126             : 
     127           0 :         if (idx == -1)
     128           0 :                 idx = info.entry_number;
     129             : 
     130             :         /*
     131             :          * index -1 means the kernel should try to find and
     132             :          * allocate an empty descriptor:
     133             :          */
     134           0 :         if (idx == -1 && can_allocate) {
     135           0 :                 idx = get_free_idx();
     136           0 :                 if (idx < 0)
     137             :                         return idx;
     138           0 :                 if (put_user(idx, &u_info->entry_number))
     139             :                         return -EFAULT;
     140             :         }
     141             : 
     142           0 :         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
     143             :                 return -EINVAL;
     144             : 
     145           0 :         set_tls_desc(p, idx, &info, 1);
     146             : 
     147             :         /*
     148             :          * If DS, ES, FS, or GS points to the modified segment, forcibly
     149             :          * refresh it.  Only needed on x86_64 because x86_32 reloads them
     150             :          * on return to user mode.
     151             :          */
     152           0 :         modified_sel = (idx << 3) | 3;
     153             : 
     154           0 :         if (p == current) {
     155             : #ifdef CONFIG_X86_64
     156           0 :                 savesegment(ds, sel);
     157           0 :                 if (sel == modified_sel)
     158           0 :                         loadsegment(ds, sel);
     159             : 
     160           0 :                 savesegment(es, sel);
     161           0 :                 if (sel == modified_sel)
     162           0 :                         loadsegment(es, sel);
     163             : 
     164           0 :                 savesegment(fs, sel);
     165           0 :                 if (sel == modified_sel)
     166           0 :                         loadsegment(fs, sel);
     167             : 
     168           0 :                 savesegment(gs, sel);
     169           0 :                 if (sel == modified_sel)
     170           0 :                         load_gs_index(sel);
     171             : #endif
     172             : 
     173             : #ifdef CONFIG_X86_32_LAZY_GS
     174             :                 savesegment(gs, sel);
     175             :                 if (sel == modified_sel)
     176             :                         loadsegment(gs, sel);
     177             : #endif
     178             :         } else {
     179             : #ifdef CONFIG_X86_64
     180           0 :                 if (p->thread.fsindex == modified_sel)
     181           0 :                         p->thread.fsbase = info.base_addr;
     182             : 
     183           0 :                 if (p->thread.gsindex == modified_sel)
     184           0 :                         p->thread.gsbase = info.base_addr;
     185             : #endif
     186             :         }
     187             : 
     188             :         return 0;
     189             : }
     190             : 
     191           0 : SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info)
     192             : {
     193           0 :         return do_set_thread_area(current, -1, u_info, 1);
     194             : }
     195             : 
     196             : 
     197             : /*
     198             :  * Get the current Thread-Local Storage area:
     199             :  */
     200             : 
     201           0 : static void fill_user_desc(struct user_desc *info, int idx,
     202             :                            const struct desc_struct *desc)
     203             : 
     204             : {
     205           0 :         memset(info, 0, sizeof(*info));
     206           0 :         info->entry_number = idx;
     207           0 :         info->base_addr = get_desc_base(desc);
     208           0 :         info->limit = get_desc_limit(desc);
     209           0 :         info->seg_32bit = desc->d;
     210           0 :         info->contents = desc->type >> 2;
     211           0 :         info->read_exec_only = !(desc->type & 2);
     212           0 :         info->limit_in_pages = desc->g;
     213           0 :         info->seg_not_present = !desc->p;
     214           0 :         info->useable = desc->avl;
     215             : #ifdef CONFIG_X86_64
     216           0 :         info->lm = desc->l;
     217             : #endif
     218           0 : }
     219             : 
     220           0 : int do_get_thread_area(struct task_struct *p, int idx,
     221             :                        struct user_desc __user *u_info)
     222             : {
     223           0 :         struct user_desc info;
     224           0 :         int index;
     225             : 
     226           0 :         if (idx == -1 && get_user(idx, &u_info->entry_number))
     227             :                 return -EFAULT;
     228             : 
     229           0 :         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
     230             :                 return -EINVAL;
     231             : 
     232           0 :         index = idx - GDT_ENTRY_TLS_MIN;
     233           0 :         index = array_index_nospec(index,
     234             :                         GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1);
     235             : 
     236           0 :         fill_user_desc(&info, idx, &p->thread.tls_array[index]);
     237             : 
     238           0 :         if (copy_to_user(u_info, &info, sizeof(info)))
     239           0 :                 return -EFAULT;
     240             :         return 0;
     241             : }
     242             : 
     243           0 : SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info)
     244             : {
     245           0 :         return do_get_thread_area(current, -1, u_info);
     246             : }
     247             : 
     248           0 : int regset_tls_active(struct task_struct *target,
     249             :                       const struct user_regset *regset)
     250             : {
     251           0 :         struct thread_struct *t = &target->thread;
     252           0 :         int n = GDT_ENTRY_TLS_ENTRIES;
     253           0 :         while (n > 0 && desc_empty(&t->tls_array[n - 1]))
     254             :                 --n;
     255           0 :         return n;
     256             : }
     257             : 
     258           0 : int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
     259             :                    struct membuf to)
     260             : {
     261           0 :         const struct desc_struct *tls;
     262           0 :         struct user_desc v;
     263           0 :         int pos;
     264             : 
     265           0 :         for (pos = 0, tls = target->thread.tls_array; to.left; pos++, tls++) {
     266           0 :                 fill_user_desc(&v, GDT_ENTRY_TLS_MIN + pos, tls);
     267           0 :                 membuf_write(&to, &v, sizeof(v));
     268             :         }
     269           0 :         return 0;
     270             : }
     271             : 
     272           0 : int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
     273             :                    unsigned int pos, unsigned int count,
     274             :                    const void *kbuf, const void __user *ubuf)
     275             : {
     276           0 :         struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
     277           0 :         const struct user_desc *info;
     278           0 :         int i;
     279             : 
     280           0 :         if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
     281           0 :             (pos % sizeof(struct user_desc)) != 0 ||
     282           0 :             (count % sizeof(struct user_desc)) != 0)
     283             :                 return -EINVAL;
     284             : 
     285           0 :         if (kbuf)
     286             :                 info = kbuf;
     287           0 :         else if (__copy_from_user(infobuf, ubuf, count))
     288             :                 return -EFAULT;
     289             :         else
     290             :                 info = infobuf;
     291             : 
     292           0 :         for (i = 0; i < count / sizeof(struct user_desc); i++)
     293           0 :                 if (!tls_desc_okay(info + i))
     294             :                         return -EINVAL;
     295             : 
     296           0 :         set_tls_desc(target,
     297           0 :                      GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
     298             :                      info, count / sizeof(struct user_desc));
     299             : 
     300           0 :         return 0;
     301             : }

Generated by: LCOV version 1.14