LCOV - code coverage report
Current view: top level - arch/x86/include/asm - qspinlock.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 10 20 50.0 %
Date: 2021-04-22 12:43:58 Functions: 3 4 75.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_X86_QSPINLOCK_H
       3             : #define _ASM_X86_QSPINLOCK_H
       4             : 
       5             : #include <linux/jump_label.h>
       6             : #include <asm/cpufeature.h>
       7             : #include <asm-generic/qspinlock_types.h>
       8             : #include <asm/paravirt.h>
       9             : #include <asm/rmwcc.h>
      10             : 
      11             : #define _Q_PENDING_LOOPS        (1 << 9)
      12             : 
      13             : #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
      14           0 : static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
      15             : {
      16           0 :         u32 val;
      17             : 
      18             :         /*
      19             :          * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
      20             :          * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
      21             :          * statement expression, which GCC doesn't like.
      22             :          */
      23           0 :         val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
      24             :                                "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
      25           0 :         val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
      26             : 
      27           0 :         return val;
      28             : }
      29             : 
      30             : #ifdef CONFIG_PARAVIRT_SPINLOCKS
      31             : extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
      32             : extern void __pv_init_lock_hash(void);
      33             : extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
      34             : extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
      35             : extern bool nopvspin;
      36             : 
      37             : #define queued_spin_unlock queued_spin_unlock
      38             : /**
      39             :  * queued_spin_unlock - release a queued spinlock
      40             :  * @lock : Pointer to queued spinlock structure
      41             :  *
      42             :  * A smp_store_release() on the least-significant byte.
      43             :  */
      44        1900 : static inline void native_queued_spin_unlock(struct qspinlock *lock)
      45             : {
      46        1900 :         smp_store_release(&lock->locked, 0);
      47             : }
      48             : 
      49      379203 : static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
      50             : {
      51      379203 :         pv_queued_spin_lock_slowpath(lock, val);
      52      379299 : }
      53             : 
      54     9487782 : static inline void queued_spin_unlock(struct qspinlock *lock)
      55             : {
      56     9487782 :         pv_queued_spin_unlock(lock);
      57     9570753 : }
      58             : 
      59             : #define vcpu_is_preempted vcpu_is_preempted
      60      344120 : static inline bool vcpu_is_preempted(long cpu)
      61             : {
      62      344120 :         return pv_vcpu_is_preempted(cpu);
      63             : }
      64             : #endif
      65             : 
      66             : #ifdef CONFIG_PARAVIRT
      67             : /*
      68             :  * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
      69             :  *
      70             :  * Native (and PV wanting native due to vCPU pinning) should disable this key.
      71             :  * It is done in this backwards fashion to only have a single direction change,
      72             :  * which removes ordering between native_pv_spin_init() and HV setup.
      73             :  */
      74             : DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
      75             : 
      76             : void native_pv_lock_init(void) __init;
      77             : 
      78             : /*
      79             :  * Shortcut for the queued_spin_lock_slowpath() function that allows
      80             :  * virt to hijack it.
      81             :  *
      82             :  * Returns:
      83             :  *   true - lock has been negotiated, all done;
      84             :  *   false - queued_spin_lock_slowpath() will do its thing.
      85             :  */
      86             : #define virt_spin_lock virt_spin_lock
      87           0 : static inline bool virt_spin_lock(struct qspinlock *lock)
      88             : {
      89           0 :         if (!static_branch_likely(&virt_spin_lock_key))
      90             :                 return false;
      91             : 
      92             :         /*
      93             :          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
      94             :          * back to a Test-and-Set spinlock, because fair locks have
      95             :          * horrible lock 'holder' preemption issues.
      96             :          */
      97             : 
      98             :         do {
      99           0 :                 while (atomic_read(&lock->val) != 0)
     100           0 :                         cpu_relax();
     101           0 :         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
     102             : 
     103             :         return true;
     104             : }
     105             : #else
     106             : static inline void native_pv_lock_init(void)
     107             : {
     108             : }
     109             : #endif /* CONFIG_PARAVIRT */
     110             : 
     111             : #include <asm-generic/qspinlock.h>
     112             : 
     113             : #endif /* _ASM_X86_QSPINLOCK_H */

Generated by: LCOV version 1.14