LCOV - code coverage report
Current view: top level - kernel/time - vsyscall.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 66 80 82.5 %
Date: 2021-04-22 12:43:58 Functions: 3 5 60.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright 2019 ARM Ltd.
       4             :  *
       5             :  * Generic implementation of update_vsyscall and update_vsyscall_tz.
       6             :  *
       7             :  * Based on the x86 specific implementation.
       8             :  */
       9             : 
      10             : #include <linux/hrtimer.h>
      11             : #include <linux/timekeeper_internal.h>
      12             : #include <vdso/datapage.h>
      13             : #include <vdso/helpers.h>
      14             : #include <vdso/vsyscall.h>
      15             : 
      16             : #include "timekeeping_internal.h"
      17             : 
      18        8288 : static inline void update_vdso_data(struct vdso_data *vdata,
      19             :                                     struct timekeeper *tk)
      20             : {
      21        8288 :         struct vdso_timestamp *vdso_ts;
      22        8288 :         u64 nsec, sec;
      23             : 
      24        8288 :         vdata[CS_HRES_COARSE].cycle_last        = tk->tkr_mono.cycle_last;
      25        8288 :         vdata[CS_HRES_COARSE].mask              = tk->tkr_mono.mask;
      26        8288 :         vdata[CS_HRES_COARSE].mult              = tk->tkr_mono.mult;
      27        8288 :         vdata[CS_HRES_COARSE].shift             = tk->tkr_mono.shift;
      28        8288 :         vdata[CS_RAW].cycle_last                = tk->tkr_raw.cycle_last;
      29        8288 :         vdata[CS_RAW].mask                      = tk->tkr_raw.mask;
      30        8288 :         vdata[CS_RAW].mult                      = tk->tkr_raw.mult;
      31        8288 :         vdata[CS_RAW].shift                     = tk->tkr_raw.shift;
      32             : 
      33             :         /* CLOCK_MONOTONIC */
      34        8288 :         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
      35        8288 :         vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
      36             : 
      37        8288 :         nsec = tk->tkr_mono.xtime_nsec;
      38        8288 :         nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
      39       10851 :         while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
      40        2563 :                 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
      41        2563 :                 vdso_ts->sec++;
      42             :         }
      43        8288 :         vdso_ts->nsec        = nsec;
      44             : 
      45             :         /* Copy MONOTONIC time for BOOTTIME */
      46        8288 :         sec     = vdso_ts->sec;
      47             :         /* Add the boot offset */
      48        8288 :         sec     += tk->monotonic_to_boot.tv_sec;
      49        8288 :         nsec    += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
      50             : 
      51             :         /* CLOCK_BOOTTIME */
      52        8288 :         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
      53        8288 :         vdso_ts->sec = sec;
      54             : 
      55        8288 :         while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
      56           0 :                 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
      57           0 :                 vdso_ts->sec++;
      58             :         }
      59        8288 :         vdso_ts->nsec        = nsec;
      60             : 
      61             :         /* CLOCK_MONOTONIC_RAW */
      62        8288 :         vdso_ts         = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
      63        8288 :         vdso_ts->sec = tk->raw_sec;
      64        8288 :         vdso_ts->nsec        = tk->tkr_raw.xtime_nsec;
      65             : 
      66             :         /* CLOCK_TAI */
      67        8288 :         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
      68        8288 :         vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
      69        8288 :         vdso_ts->nsec        = tk->tkr_mono.xtime_nsec;
      70        8288 : }
      71             : 
      72        8311 : void update_vsyscall(struct timekeeper *tk)
      73             : {
      74        8311 :         struct vdso_data *vdata = __arch_get_k_vdso_data();
      75        8311 :         struct vdso_timestamp *vdso_ts;
      76        8311 :         s32 clock_mode;
      77        8311 :         u64 nsec;
      78             : 
      79             :         /* copy vsyscall data */
      80        8311 :         vdso_write_begin(vdata);
      81             : 
      82        8311 :         clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
      83        8311 :         vdata[CS_HRES_COARSE].clock_mode        = clock_mode;
      84        8311 :         vdata[CS_RAW].clock_mode                = clock_mode;
      85             : 
      86             :         /* CLOCK_REALTIME also required for time() */
      87        8311 :         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
      88        8311 :         vdso_ts->sec = tk->xtime_sec;
      89        8311 :         vdso_ts->nsec        = tk->tkr_mono.xtime_nsec;
      90             : 
      91             :         /* CLOCK_REALTIME_COARSE */
      92        8311 :         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
      93        8311 :         vdso_ts->sec = tk->xtime_sec;
      94        8311 :         vdso_ts->nsec        = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
      95             : 
      96             :         /* CLOCK_MONOTONIC_COARSE */
      97        8311 :         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
      98        8311 :         vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
      99        8311 :         nsec            = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
     100        8311 :         nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
     101        8311 :         vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
     102             : 
     103             :         /*
     104             :          * Read without the seqlock held by clock_getres().
     105             :          * Note: No need to have a second copy.
     106             :          */
     107        8311 :         WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
     108             : 
     109             :         /*
     110             :          * If the current clocksource is not VDSO capable, then spare the
     111             :          * update of the high reolution parts.
     112             :          */
     113        8311 :         if (clock_mode != VDSO_CLOCKMODE_NONE)
     114        8288 :                 update_vdso_data(vdata, tk);
     115             : 
     116        8311 :         __arch_update_vsyscall(vdata, tk);
     117             : 
     118        8311 :         vdso_write_end(vdata);
     119             : 
     120        8311 :         __arch_sync_vdso_data(vdata);
     121        8311 : }
     122             : 
     123           1 : void update_vsyscall_tz(void)
     124             : {
     125           1 :         struct vdso_data *vdata = __arch_get_k_vdso_data();
     126             : 
     127           1 :         vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
     128           1 :         vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
     129             : 
     130           1 :         __arch_sync_vdso_data(vdata);
     131           1 : }
     132             : 
     133             : /**
     134             :  * vdso_update_begin - Start of a VDSO update section
     135             :  *
     136             :  * Allows architecture code to safely update the architecture specific VDSO
     137             :  * data. Disables interrupts, acquires timekeeper lock to serialize against
     138             :  * concurrent updates from timekeeping and invalidates the VDSO data
     139             :  * sequence counter to prevent concurrent readers from accessing
     140             :  * inconsistent data.
     141             :  *
     142             :  * Returns: Saved interrupt flags which need to be handed in to
     143             :  * vdso_update_end().
     144             :  */
     145           0 : unsigned long vdso_update_begin(void)
     146             : {
     147           0 :         struct vdso_data *vdata = __arch_get_k_vdso_data();
     148           0 :         unsigned long flags;
     149             : 
     150           0 :         raw_spin_lock_irqsave(&timekeeper_lock, flags);
     151           0 :         vdso_write_begin(vdata);
     152           0 :         return flags;
     153             : }
     154             : 
     155             : /**
     156             :  * vdso_update_end - End of a VDSO update section
     157             :  * @flags:      Interrupt flags as returned from vdso_update_begin()
     158             :  *
     159             :  * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data
     160             :  * synchronization if the architecture requires it, drops timekeeper lock
     161             :  * and restores interrupt flags.
     162             :  */
     163           0 : void vdso_update_end(unsigned long flags)
     164             : {
     165           0 :         struct vdso_data *vdata = __arch_get_k_vdso_data();
     166             : 
     167           0 :         vdso_write_end(vdata);
     168           0 :         __arch_sync_vdso_data(vdata);
     169           0 :         raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
     170           0 : }

Generated by: LCOV version 1.14