LCOV - code coverage report
Current view: top level - kernel/locking - qrwlock.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 20 22 90.9 %
Date: 2021-04-22 12:43:58 Functions: 2 2 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Queued read/write locks
       4             :  *
       5             :  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
       6             :  *
       7             :  * Authors: Waiman Long <waiman.long@hp.com>
       8             :  */
       9             : #include <linux/smp.h>
      10             : #include <linux/bug.h>
      11             : #include <linux/cpumask.h>
      12             : #include <linux/percpu.h>
      13             : #include <linux/hardirq.h>
      14             : #include <linux/spinlock.h>
      15             : 
      16             : /**
      17             :  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
      18             :  * @lock: Pointer to queue rwlock structure
      19             :  */
      20          12 : void queued_read_lock_slowpath(struct qrwlock *lock)
      21             : {
      22             :         /*
      23             :          * Readers come here when they cannot get the lock without waiting
      24             :          */
      25          12 :         if (unlikely(in_interrupt())) {
      26             :                 /*
      27             :                  * Readers in interrupt context will get the lock immediately
      28             :                  * if the writer is just waiting (not holding the lock yet),
      29             :                  * so spin with ACQUIRE semantics until the lock is available
      30             :                  * without waiting in the queue.
      31             :                  */
      32           0 :                 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
      33           0 :                 return;
      34             :         }
      35          12 :         atomic_sub(_QR_BIAS, &lock->cnts);
      36             : 
      37             :         /*
      38             :          * Put the reader into the wait queue
      39             :          */
      40          12 :         arch_spin_lock(&lock->wait_lock);
      41          12 :         atomic_add(_QR_BIAS, &lock->cnts);
      42             : 
      43             :         /*
      44             :          * The ACQUIRE semantics of the following spinning code ensure
      45             :          * that accesses can't leak upwards out of our subsequent critical
      46             :          * section in the case that the lock is currently held for write.
      47             :          */
      48       10417 :         atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
      49             : 
      50             :         /*
      51             :          * Signal the next one in queue to become queue head
      52             :          */
      53          12 :         arch_spin_unlock(&lock->wait_lock);
      54             : }
      55             : EXPORT_SYMBOL(queued_read_lock_slowpath);
      56             : 
      57             : /**
      58             :  * queued_write_lock_slowpath - acquire write lock of a queue rwlock
      59             :  * @lock : Pointer to queue rwlock structure
      60             :  */
      61          22 : void queued_write_lock_slowpath(struct qrwlock *lock)
      62             : {
      63             :         /* Put the writer into the wait queue */
      64          22 :         arch_spin_lock(&lock->wait_lock);
      65             : 
      66             :         /* Try to acquire the lock directly if no reader is present */
      67          22 :         if (!atomic_read(&lock->cnts) &&
      68           6 :             (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
      69           3 :                 goto unlock;
      70             : 
      71             :         /* Set the waiting flag to notify readers that a writer is pending */
      72          19 :         atomic_add(_QW_WAITING, &lock->cnts);
      73             : 
      74             :         /* When no more readers or writers, set the locked flag */
      75          19 :         do {
      76       20788 :                 atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
      77          19 :         } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
      78          19 :                                         _QW_LOCKED) != _QW_WAITING);
      79          19 : unlock:
      80          22 :         arch_spin_unlock(&lock->wait_lock);
      81          22 : }
      82             : EXPORT_SYMBOL(queued_write_lock_slowpath);

Generated by: LCOV version 1.14