LCOV - code coverage report
Current view: top level - kernel/locking - spinlock_debug.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 81 112 72.3 %
Date: 2021-04-22 12:43:58 Functions: 13 18 72.2 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2005, Red Hat, Inc., Ingo Molnar
       3             :  * Released under the General Public License (GPL).
       4             :  *
       5             :  * This file contains the spinlock/rwlock implementations for
       6             :  * DEBUG_SPINLOCK.
       7             :  */
       8             : 
       9             : #include <linux/spinlock.h>
      10             : #include <linux/nmi.h>
      11             : #include <linux/interrupt.h>
      12             : #include <linux/debug_locks.h>
      13             : #include <linux/delay.h>
      14             : #include <linux/export.h>
      15             : 
      16     1019918 : void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
      17             :                           struct lock_class_key *key, short inner)
      18             : {
      19             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
      20             :         /*
      21             :          * Make sure we are not reinitializing a held lock:
      22             :          */
      23     1019918 :         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
      24     1020250 :         lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
      25             : #endif
      26     1020146 :         lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
      27     1020146 :         lock->magic = SPINLOCK_MAGIC;
      28     1020146 :         lock->owner = SPINLOCK_OWNER_INIT;
      29     1020146 :         lock->owner_cpu = -1;
      30     1020146 : }
      31             : 
      32             : EXPORT_SYMBOL(__raw_spin_lock_init);
      33             : 
      34       65519 : void __rwlock_init(rwlock_t *lock, const char *name,
      35             :                    struct lock_class_key *key)
      36             : {
      37             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
      38             :         /*
      39             :          * Make sure we are not reinitializing a held lock:
      40             :          */
      41       65519 :         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
      42       65519 :         lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
      43             : #endif
      44       65519 :         lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
      45       65519 :         lock->magic = RWLOCK_MAGIC;
      46       65519 :         lock->owner = SPINLOCK_OWNER_INIT;
      47       65519 :         lock->owner_cpu = -1;
      48       65519 : }
      49             : 
      50             : EXPORT_SYMBOL(__rwlock_init);
      51             : 
      52           0 : static void spin_dump(raw_spinlock_t *lock, const char *msg)
      53             : {
      54           0 :         struct task_struct *owner = READ_ONCE(lock->owner);
      55             : 
      56           0 :         if (owner == SPINLOCK_OWNER_INIT)
      57           0 :                 owner = NULL;
      58           0 :         printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
      59           0 :                 msg, raw_smp_processor_id(),
      60           0 :                 current->comm, task_pid_nr(current));
      61           0 :         printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
      62             :                         ".owner_cpu: %d\n",
      63           0 :                 lock, READ_ONCE(lock->magic),
      64             :                 owner ? owner->comm : "<none>",
      65           0 :                 owner ? task_pid_nr(owner) : -1,
      66           0 :                 READ_ONCE(lock->owner_cpu));
      67           0 :         dump_stack();
      68           0 : }
      69             : 
      70           0 : static void spin_bug(raw_spinlock_t *lock, const char *msg)
      71             : {
      72           0 :         if (!debug_locks_off())
      73             :                 return;
      74             : 
      75           0 :         spin_dump(lock, msg);
      76             : }
      77             : 
      78             : #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
      79             : 
      80             : static inline void
      81     9023154 : debug_spin_lock_before(raw_spinlock_t *lock)
      82             : {
      83     9023154 :         SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
      84     9023154 :         SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
      85     9023154 :         SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
      86             :                                                         lock, "cpu recursion");
      87     9042564 : }
      88             : 
      89     9120948 : static inline void debug_spin_lock_after(raw_spinlock_t *lock)
      90             : {
      91    18244187 :         WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
      92       26146 :         WRITE_ONCE(lock->owner, current);
      93       26146 : }
      94             : 
      95     9045181 : static inline void debug_spin_unlock(raw_spinlock_t *lock)
      96             : {
      97     9045181 :         SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
      98     9045181 :         SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
      99     9050270 :         SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
     100     9050270 :         SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
     101             :                                                         lock, "wrong CPU");
     102     9053679 :         WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
     103     9053679 :         WRITE_ONCE(lock->owner_cpu, -1);
     104     9053679 : }
     105             : 
     106             : /*
     107             :  * We are now relying on the NMI watchdog to detect lockup instead of doing
     108             :  * the detection here with an unfair lock which can cause problem of its own.
     109             :  */
     110     9018160 : void do_raw_spin_lock(raw_spinlock_t *lock)
     111             : {
     112     9018160 :         debug_spin_lock_before(lock);
     113     9049702 :         arch_spin_lock(&lock->raw_lock);
     114     9094802 :         mmiowb_spin_lock();
     115     9094802 :         debug_spin_lock_after(lock);
     116     9097093 : }
     117             : 
     118       26795 : int do_raw_spin_trylock(raw_spinlock_t *lock)
     119             : {
     120       26795 :         int ret = arch_spin_trylock(&lock->raw_lock);
     121             : 
     122       26907 :         if (ret) {
     123       26146 :                 mmiowb_spin_lock();
     124       26146 :                 debug_spin_lock_after(lock);
     125             :         }
     126             : #ifndef CONFIG_SMP
     127             :         /*
     128             :          * Must not happen on UP:
     129             :          */
     130             :         SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
     131             : #endif
     132       26907 :         return ret;
     133             : }
     134             : 
     135     9043141 : void do_raw_spin_unlock(raw_spinlock_t *lock)
     136             : {
     137     9043141 :         mmiowb_spin_unlock();
     138     9043141 :         debug_spin_unlock(lock);
     139     9052798 :         arch_spin_unlock(&lock->raw_lock);
     140     9126197 : }
     141             : 
     142           0 : static void rwlock_bug(rwlock_t *lock, const char *msg)
     143             : {
     144           0 :         if (!debug_locks_off())
     145             :                 return;
     146             : 
     147           0 :         printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
     148           0 :                 msg, raw_smp_processor_id(), current->comm,
     149             :                 task_pid_nr(current), lock);
     150           0 :         dump_stack();
     151             : }
     152             : 
     153             : #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
     154             : 
     155       41882 : void do_raw_read_lock(rwlock_t *lock)
     156             : {
     157       41882 :         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
     158       41882 :         arch_read_lock(&lock->raw_lock);
     159       41884 : }
     160             : 
     161           0 : int do_raw_read_trylock(rwlock_t *lock)
     162             : {
     163           0 :         int ret = arch_read_trylock(&lock->raw_lock);
     164             : 
     165             : #ifndef CONFIG_SMP
     166             :         /*
     167             :          * Must not happen on UP:
     168             :          */
     169             :         RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
     170             : #endif
     171           0 :         return ret;
     172             : }
     173             : 
     174       41880 : void do_raw_read_unlock(rwlock_t *lock)
     175             : {
     176       41880 :         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
     177       41880 :         arch_read_unlock(&lock->raw_lock);
     178       41884 : }
     179             : 
     180       22680 : static inline void debug_write_lock_before(rwlock_t *lock)
     181             : {
     182       22680 :         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
     183       22680 :         RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
     184       22680 :         RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
     185             :                                                         lock, "cpu recursion");
     186       22680 : }
     187             : 
     188       22682 : static inline void debug_write_lock_after(rwlock_t *lock)
     189             : {
     190       45364 :         WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
     191           0 :         WRITE_ONCE(lock->owner, current);
     192           0 : }
     193             : 
     194       22679 : static inline void debug_write_unlock(rwlock_t *lock)
     195             : {
     196       22679 :         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
     197       22679 :         RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
     198       22679 :         RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
     199             :                                                         lock, "wrong CPU");
     200       22679 :         WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
     201       22679 :         WRITE_ONCE(lock->owner_cpu, -1);
     202       22679 : }
     203             : 
     204       22680 : void do_raw_write_lock(rwlock_t *lock)
     205             : {
     206       22680 :         debug_write_lock_before(lock);
     207       22680 :         arch_write_lock(&lock->raw_lock);
     208       22682 :         debug_write_lock_after(lock);
     209       22682 : }
     210             : 
     211           0 : int do_raw_write_trylock(rwlock_t *lock)
     212             : {
     213           0 :         int ret = arch_write_trylock(&lock->raw_lock);
     214             : 
     215           0 :         if (ret)
     216           0 :                 debug_write_lock_after(lock);
     217             : #ifndef CONFIG_SMP
     218             :         /*
     219             :          * Must not happen on UP:
     220             :          */
     221             :         RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
     222             : #endif
     223           0 :         return ret;
     224             : }
     225             : 
     226       22679 : void do_raw_write_unlock(rwlock_t *lock)
     227             : {
     228       22679 :         debug_write_unlock(lock);
     229       22680 :         arch_write_unlock(&lock->raw_lock);
     230       22681 : }

Generated by: LCOV version 1.14