LCOV - code coverage report
Current view: top level - arch/x86/kernel/fpu - signal.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 113 207 54.6 %
Date: 2021-04-22 12:43:58 Functions: 10 12 83.3 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * FPU signal frame handling routines.
       4             :  */
       5             : 
       6             : #include <linux/compat.h>
       7             : #include <linux/cpu.h>
       8             : #include <linux/pagemap.h>
       9             : 
      10             : #include <asm/fpu/internal.h>
      11             : #include <asm/fpu/signal.h>
      12             : #include <asm/fpu/regset.h>
      13             : #include <asm/fpu/xstate.h>
      14             : 
      15             : #include <asm/sigframe.h>
      16             : #include <asm/trace/fpu.h>
      17             : 
      18             : static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
      19             : 
      20             : /*
      21             :  * Check for the presence of extended state information in the
      22             :  * user fpstate pointer in the sigcontext.
      23             :  */
      24        1306 : static inline int check_for_xstate(struct fxregs_state __user *buf,
      25             :                                    void __user *fpstate,
      26             :                                    struct _fpx_sw_bytes *fx_sw)
      27             : {
      28        1306 :         int min_xstate_size = sizeof(struct fxregs_state) +
      29             :                               sizeof(struct xstate_header);
      30        1306 :         unsigned int magic2;
      31             : 
      32        2612 :         if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
      33             :                 return -1;
      34             : 
      35             :         /* Check for the first magic field and other error scenarios. */
      36        1306 :         if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
      37        1306 :             fx_sw->xstate_size < min_xstate_size ||
      38        1306 :             fx_sw->xstate_size > fpu_user_xstate_size ||
      39        1306 :             fx_sw->xstate_size > fx_sw->extended_size)
      40             :                 return -1;
      41             : 
      42             :         /*
      43             :          * Check for the presence of second magic word at the end of memory
      44             :          * layout. This detects the case where the user just copied the legacy
      45             :          * fpstate layout with out copying the extended state information
      46             :          * in the memory layout.
      47             :          */
      48        1306 :         if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
      49        1306 :             || magic2 != FP_XSTATE_MAGIC2)
      50           0 :                 return -1;
      51             : 
      52             :         return 0;
      53             : }
      54             : 
      55             : /*
      56             :  * Signal frame handlers.
      57             :  */
      58           0 : static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
      59             : {
      60           0 :         if (use_fxsr()) {
      61           0 :                 struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
      62           0 :                 struct user_i387_ia32_struct env;
      63           0 :                 struct _fpstate_32 __user *fp = buf;
      64             : 
      65           0 :                 fpregs_lock();
      66           0 :                 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
      67           0 :                         copy_fxregs_to_kernel(&tsk->thread.fpu);
      68           0 :                 fpregs_unlock();
      69             : 
      70           0 :                 convert_from_fxsr(&env, tsk);
      71             : 
      72           0 :                 if (__copy_to_user(buf, &env, sizeof(env)) ||
      73           0 :                     __put_user(xsave->i387.swd, &fp->status) ||
      74           0 :                     __put_user(X86_FXSR_MAGIC, &fp->magic))
      75           0 :                         return -1;
      76             :         } else {
      77             :                 struct fregs_state __user *fp = buf;
      78             :                 u32 swd;
      79             :                 if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
      80             :                         return -1;
      81             :         }
      82             : 
      83           0 :         return 0;
      84             : }
      85             : 
      86        1306 : static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
      87             : {
      88        1306 :         struct xregs_state __user *x = buf;
      89        1306 :         struct _fpx_sw_bytes *sw_bytes;
      90        1306 :         u32 xfeatures;
      91        1306 :         int err;
      92             : 
      93             :         /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
      94        1306 :         sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
      95        1306 :         err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
      96             : 
      97        1306 :         if (!use_xsave())
      98             :                 return err;
      99             : 
     100        1306 :         err |= __put_user(FP_XSTATE_MAGIC2,
     101             :                           (__u32 __user *)(buf + fpu_user_xstate_size));
     102             : 
     103             :         /*
     104             :          * Read the xfeatures which we copied (directly from the cpu or
     105             :          * from the state in task struct) to the user buffers.
     106             :          */
     107        1306 :         err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
     108             : 
     109             :         /*
     110             :          * For legacy compatible, we always set FP/SSE bits in the bit
     111             :          * vector while saving the state to the user context. This will
     112             :          * enable us capturing any changes(during sigreturn) to
     113             :          * the FP/SSE bits by the legacy applications which don't touch
     114             :          * xfeatures in the xsave header.
     115             :          *
     116             :          * xsave aware apps can change the xfeatures in the xsave
     117             :          * header as well as change any contents in the memory layout.
     118             :          * xrestore as part of sigreturn will capture all the changes.
     119             :          */
     120        1306 :         xfeatures |= XFEATURE_MASK_FPSSE;
     121             : 
     122        1306 :         err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
     123             : 
     124        1306 :         return err;
     125             : }
     126             : 
     127        1644 : static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
     128             : {
     129        1644 :         int err;
     130             : 
     131        1644 :         if (use_xsave())
     132        1644 :                 err = copy_xregs_to_user(buf);
     133           0 :         else if (use_fxsr())
     134           0 :                 err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
     135             :         else
     136             :                 err = copy_fregs_to_user((struct fregs_state __user *) buf);
     137             : 
     138        1644 :         if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
     139         338 :                 err = -EFAULT;
     140        1644 :         return err;
     141             : }
     142             : 
     143             : /*
     144             :  * Save the fpu, extended register state to the user signal frame.
     145             :  *
     146             :  * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
     147             :  *  state is copied.
     148             :  *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
     149             :  *
     150             :  *      buf == buf_fx for 64-bit frames and 32-bit fsave frame.
     151             :  *      buf != buf_fx for 32-bit frames with fxstate.
     152             :  *
     153             :  * Try to save it directly to the user frame with disabled page fault handler.
     154             :  * If this fails then do the slow path where the FPU state is first saved to
     155             :  * task's fpu->state and then copy it to the user frame pointed to by the
     156             :  * aligned pointer 'buf_fx'.
     157             :  *
     158             :  * If this is a 32-bit frame with fxstate, put a fsave header before
     159             :  * the aligned state at 'buf_fx'.
     160             :  *
     161             :  * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
     162             :  * indicating the absence/presence of the extended state to the user.
     163             :  */
     164        1306 : int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
     165             : {
     166        1306 :         struct task_struct *tsk = current;
     167        1306 :         int ia32_fxstate = (buf != buf_fx);
     168        1306 :         int ret;
     169             : 
     170        1306 :         ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
     171             :                          IS_ENABLED(CONFIG_IA32_EMULATION));
     172             : 
     173        1306 :         if (!static_cpu_has(X86_FEATURE_FPU)) {
     174             :                 struct user_i387_ia32_struct fp;
     175             :                 fpregs_soft_get(current, NULL, (struct membuf){.p = &fp,
     176             :                                                 .left = sizeof(fp)});
     177             :                 return copy_to_user(buf, &fp, sizeof(fp)) ? -EFAULT : 0;
     178             :         }
     179             : 
     180        2612 :         if (!access_ok(buf, size))
     181             :                 return -EACCES;
     182        1306 : retry:
     183             :         /*
     184             :          * Load the FPU registers if they are not valid for the current task.
     185             :          * With a valid FPU state we can attempt to save the state directly to
     186             :          * userland's stack frame which will likely succeed. If it does not,
     187             :          * resolve the fault in the user memory and try again.
     188             :          */
     189        1644 :         fpregs_lock();
     190        1644 :         if (test_thread_flag(TIF_NEED_FPU_LOAD))
     191        1268 :                 __fpregs_load_activate();
     192             : 
     193        1644 :         pagefault_disable();
     194        1644 :         ret = copy_fpregs_to_sigframe(buf_fx);
     195        1644 :         pagefault_enable();
     196        1644 :         fpregs_unlock();
     197             : 
     198        1644 :         if (ret) {
     199         338 :                 if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
     200         338 :                         goto retry;
     201             :                 return -EFAULT;
     202             :         }
     203             : 
     204             :         /* Save the fsave header for the 32-bit frames. */
     205        1306 :         if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
     206             :                 return -1;
     207             : 
     208        1306 :         if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
     209           0 :                 return -1;
     210             : 
     211             :         return 0;
     212             : }
     213             : 
     214             : static inline void
     215           0 : sanitize_restored_user_xstate(union fpregs_state *state,
     216             :                               struct user_i387_ia32_struct *ia32_env,
     217             :                               u64 user_xfeatures, int fx_only)
     218             : {
     219           0 :         struct xregs_state *xsave = &state->xsave;
     220           0 :         struct xstate_header *header = &xsave->header;
     221             : 
     222           0 :         if (use_xsave()) {
     223             :                 /*
     224             :                  * Note: we don't need to zero the reserved bits in the
     225             :                  * xstate_header here because we either didn't copy them at all,
     226             :                  * or we checked earlier that they aren't set.
     227             :                  */
     228             : 
     229             :                 /*
     230             :                  * 'user_xfeatures' might have bits clear which are
     231             :                  * set in header->xfeatures. This represents features that
     232             :                  * were in init state prior to a signal delivery, and need
     233             :                  * to be reset back to the init state.  Clear any user
     234             :                  * feature bits which are set in the kernel buffer to get
     235             :                  * them back to the init state.
     236             :                  *
     237             :                  * Supervisor state is unchanged by input from userspace.
     238             :                  * Ensure supervisor state bits stay set and supervisor
     239             :                  * state is not modified.
     240             :                  */
     241           0 :                 if (fx_only)
     242           0 :                         header->xfeatures = XFEATURE_MASK_FPSSE;
     243             :                 else
     244           0 :                         header->xfeatures &= user_xfeatures |
     245             :                                              xfeatures_mask_supervisor();
     246             :         }
     247             : 
     248           0 :         if (use_fxsr()) {
     249             :                 /*
     250             :                  * mscsr reserved bits must be masked to zero for security
     251             :                  * reasons.
     252             :                  */
     253           0 :                 xsave->i387.mxcsr &= mxcsr_feature_mask;
     254             : 
     255           0 :                 if (ia32_env)
     256           0 :                         convert_to_fxsr(&state->fxsave, ia32_env);
     257             :         }
     258           0 : }
     259             : 
     260             : /*
     261             :  * Restore the extended state if present. Otherwise, restore the FP/SSE state.
     262             :  */
     263        1306 : static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
     264             : {
     265        1306 :         u64 init_bv;
     266        1306 :         int r;
     267             : 
     268        1306 :         if (use_xsave()) {
     269        1306 :                 if (fx_only) {
     270           0 :                         init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
     271             : 
     272           0 :                         r = copy_user_to_fxregs(buf);
     273           0 :                         if (!r)
     274           0 :                                 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
     275           0 :                         return r;
     276             :                 } else {
     277        1306 :                         init_bv = xfeatures_mask_user() & ~xbv;
     278             : 
     279        1306 :                         r = copy_user_to_xregs(buf, xbv);
     280        1306 :                         if (!r && unlikely(init_bv))
     281           0 :                                 copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
     282        1306 :                         return r;
     283             :                 }
     284           0 :         } else if (use_fxsr()) {
     285           0 :                 return copy_user_to_fxregs(buf);
     286             :         } else
     287             :                 return copy_user_to_fregs(buf);
     288             : }
     289             : 
     290        1306 : static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
     291             : {
     292        1306 :         struct user_i387_ia32_struct *envp = NULL;
     293        1306 :         int state_size = fpu_kernel_xstate_size;
     294        1306 :         int ia32_fxstate = (buf != buf_fx);
     295        1306 :         struct task_struct *tsk = current;
     296        1306 :         struct fpu *fpu = &tsk->thread.fpu;
     297        1306 :         struct user_i387_ia32_struct env;
     298        1306 :         u64 user_xfeatures = 0;
     299        1306 :         int fx_only = 0;
     300        1306 :         int ret = 0;
     301             : 
     302        1306 :         ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
     303             :                          IS_ENABLED(CONFIG_IA32_EMULATION));
     304             : 
     305        1306 :         if (!buf) {
     306           0 :                 fpu__clear_user_states(fpu);
     307           0 :                 return 0;
     308             :         }
     309             : 
     310        2612 :         if (!access_ok(buf, size))
     311             :                 return -EACCES;
     312             : 
     313        1306 :         if (!static_cpu_has(X86_FEATURE_FPU))
     314             :                 return fpregs_soft_set(current, NULL,
     315             :                                        0, sizeof(struct user_i387_ia32_struct),
     316             :                                        NULL, buf) != 0;
     317             : 
     318        1306 :         if (use_xsave()) {
     319        1306 :                 struct _fpx_sw_bytes fx_sw_user;
     320        1306 :                 if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
     321             :                         /*
     322             :                          * Couldn't find the extended state information in the
     323             :                          * memory layout. Restore just the FP/SSE and init all
     324             :                          * the other extended state.
     325             :                          */
     326           0 :                         state_size = sizeof(struct fxregs_state);
     327           0 :                         fx_only = 1;
     328           0 :                         trace_x86_fpu_xstate_check_failed(fpu);
     329             :                 } else {
     330        1306 :                         state_size = fx_sw_user.xstate_size;
     331        1306 :                         user_xfeatures = fx_sw_user.xfeatures;
     332             :                 }
     333             :         }
     334             : 
     335        1306 :         if ((unsigned long)buf_fx % 64)
     336           0 :                 fx_only = 1;
     337             : 
     338        1306 :         if (!ia32_fxstate) {
     339             :                 /*
     340             :                  * Attempt to restore the FPU registers directly from user
     341             :                  * memory. For that to succeed, the user access cannot cause
     342             :                  * page faults. If it does, fall back to the slow path below,
     343             :                  * going through the kernel buffer with the enabled pagefault
     344             :                  * handler.
     345             :                  */
     346        1306 :                 fpregs_lock();
     347        1306 :                 pagefault_disable();
     348        1306 :                 ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only);
     349        1306 :                 pagefault_enable();
     350        1306 :                 if (!ret) {
     351             : 
     352             :                         /*
     353             :                          * Restore supervisor states: previous context switch
     354             :                          * etc has done XSAVES and saved the supervisor states
     355             :                          * in the kernel buffer from which they can be restored
     356             :                          * now.
     357             :                          *
     358             :                          * We cannot do a single XRSTORS here - which would
     359             :                          * be nice - because the rest of the FPU registers are
     360             :                          * being restored from a user buffer directly. The
     361             :                          * single XRSTORS happens below, when the user buffer
     362             :                          * has been copied to the kernel one.
     363             :                          */
     364        1306 :                         if (test_thread_flag(TIF_NEED_FPU_LOAD) &&
     365             :                             xfeatures_mask_supervisor())
     366           0 :                                 copy_kernel_to_xregs(&fpu->state.xsave,
     367             :                                                      xfeatures_mask_supervisor());
     368        1306 :                         fpregs_mark_activate();
     369        1306 :                         fpregs_unlock();
     370        1306 :                         return 0;
     371             :                 }
     372           0 :                 fpregs_unlock();
     373             :         } else {
     374             :                 /*
     375             :                  * For 32-bit frames with fxstate, copy the fxstate so it can
     376             :                  * be reconstructed later.
     377             :                  */
     378           0 :                 ret = __copy_from_user(&env, buf, sizeof(env));
     379           0 :                 if (ret)
     380           0 :                         goto err_out;
     381             :                 envp = &env;
     382             :         }
     383             : 
     384             :         /*
     385             :          * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
     386             :          * not modified on context switch and that the xstate is considered
     387             :          * to be loaded again on return to userland (overriding last_cpu avoids
     388             :          * the optimisation).
     389             :          */
     390           0 :         fpregs_lock();
     391             : 
     392           0 :         if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
     393             : 
     394             :                 /*
     395             :                  * Supervisor states are not modified by user space input.  Save
     396             :                  * current supervisor states first and invalidate the FPU regs.
     397             :                  */
     398           0 :                 if (xfeatures_mask_supervisor())
     399           0 :                         copy_supervisor_to_kernel(&fpu->state.xsave);
     400           0 :                 set_thread_flag(TIF_NEED_FPU_LOAD);
     401             :         }
     402           0 :         __fpu_invalidate_fpregs_state(fpu);
     403           0 :         fpregs_unlock();
     404             : 
     405           0 :         if (use_xsave() && !fx_only) {
     406           0 :                 u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
     407             : 
     408           0 :                 if (using_compacted_format()) {
     409           0 :                         ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
     410             :                 } else {
     411           0 :                         ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
     412             : 
     413           0 :                         if (!ret && state_size > offsetof(struct xregs_state, header))
     414           0 :                                 ret = validate_user_xstate_header(&fpu->state.xsave.header);
     415             :                 }
     416           0 :                 if (ret)
     417           0 :                         goto err_out;
     418             : 
     419           0 :                 sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
     420             :                                               fx_only);
     421             : 
     422           0 :                 fpregs_lock();
     423           0 :                 if (unlikely(init_bv))
     424           0 :                         copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
     425             : 
     426             :                 /*
     427             :                  * Restore previously saved supervisor xstates along with
     428             :                  * copied-in user xstates.
     429             :                  */
     430           0 :                 ret = copy_kernel_to_xregs_err(&fpu->state.xsave,
     431             :                                                user_xfeatures | xfeatures_mask_supervisor());
     432             : 
     433           0 :         } else if (use_fxsr()) {
     434           0 :                 ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
     435           0 :                 if (ret) {
     436           0 :                         ret = -EFAULT;
     437           0 :                         goto err_out;
     438             :                 }
     439             : 
     440           0 :                 sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
     441             :                                               fx_only);
     442             : 
     443           0 :                 fpregs_lock();
     444           0 :                 if (use_xsave()) {
     445           0 :                         u64 init_bv;
     446             : 
     447           0 :                         init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
     448           0 :                         copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
     449             :                 }
     450             : 
     451           0 :                 ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
     452             :         } else {
     453             :                 ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
     454             :                 if (ret)
     455             :                         goto err_out;
     456             : 
     457             :                 fpregs_lock();
     458             :                 ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
     459             :         }
     460           0 :         if (!ret)
     461           0 :                 fpregs_mark_activate();
     462             :         else
     463           0 :                 fpregs_deactivate(fpu);
     464           0 :         fpregs_unlock();
     465             : 
     466           0 : err_out:
     467           0 :         if (ret)
     468           0 :                 fpu__clear_user_states(fpu);
     469             :         return ret;
     470             : }
     471             : 
     472        2612 : static inline int xstate_sigframe_size(void)
     473             : {
     474        5224 :         return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
     475             :                         fpu_user_xstate_size;
     476             : }
     477             : 
     478             : /*
     479             :  * Restore FPU state from a sigframe:
     480             :  */
     481        1306 : int fpu__restore_sig(void __user *buf, int ia32_frame)
     482             : {
     483        1306 :         void __user *buf_fx = buf;
     484        1306 :         int size = xstate_sigframe_size();
     485             : 
     486        1306 :         if (ia32_frame && use_fxsr()) {
     487           0 :                 buf_fx = buf + sizeof(struct fregs_state);
     488           0 :                 size += sizeof(struct fregs_state);
     489             :         }
     490             : 
     491        1306 :         return __fpu__restore_sig(buf, buf_fx, size);
     492             : }
     493             : 
     494             : unsigned long
     495        1306 : fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
     496             :                      unsigned long *buf_fx, unsigned long *size)
     497             : {
     498        1306 :         unsigned long frame_size = xstate_sigframe_size();
     499             : 
     500        1306 :         *buf_fx = sp = round_down(sp - frame_size, 64);
     501        1306 :         if (ia32_frame && use_fxsr()) {
     502           0 :                 frame_size += sizeof(struct fregs_state);
     503           0 :                 sp -= sizeof(struct fregs_state);
     504             :         }
     505             : 
     506        1306 :         *size = frame_size;
     507             : 
     508        1306 :         return sp;
     509             : }
     510             : /*
     511             :  * Prepare the SW reserved portion of the fxsave memory layout, indicating
     512             :  * the presence of the extended state information in the memory layout
     513             :  * pointed by the fpstate pointer in the sigcontext.
     514             :  * This will be saved when ever the FP and extended state context is
     515             :  * saved on the user stack during the signal handler delivery to the user.
     516             :  */
     517           1 : void fpu__init_prepare_fx_sw_frame(void)
     518             : {
     519           1 :         int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE;
     520             : 
     521           1 :         fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
     522           1 :         fx_sw_reserved.extended_size = size;
     523           1 :         fx_sw_reserved.xfeatures = xfeatures_mask_user();
     524           1 :         fx_sw_reserved.xstate_size = fpu_user_xstate_size;
     525             : 
     526           1 :         if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
     527             :             IS_ENABLED(CONFIG_X86_32)) {
     528           1 :                 int fsave_header_size = sizeof(struct fregs_state);
     529             : 
     530           1 :                 fx_sw_reserved_ia32 = fx_sw_reserved;
     531           1 :                 fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
     532             :         }
     533           1 : }
     534             : 

Generated by: LCOV version 1.14