LCOV - code coverage report
Current view: top level - drivers/tty - tty_ldsem.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 44 170 25.9 %
Date: 2021-04-22 12:43:58 Functions: 8 17 47.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Ldisc rw semaphore
       4             :  *
       5             :  * The ldisc semaphore is semantically a rw_semaphore but which enforces
       6             :  * an alternate policy, namely:
       7             :  *   1) Supports lock wait timeouts
       8             :  *   2) Write waiter has priority
       9             :  *   3) Downgrading is not supported
      10             :  *
      11             :  * Implementation notes:
      12             :  *   1) Upper half of semaphore count is a wait count (differs from rwsem
      13             :  *      in that rwsem normalizes the upper half to the wait bias)
      14             :  *   2) Lacks overflow checking
      15             :  *
      16             :  * The generic counting was copied and modified from include/asm-generic/rwsem.h
      17             :  * by Paul Mackerras <paulus@samba.org>.
      18             :  *
      19             :  * The scheduling policy was copied and modified from lib/rwsem.c
      20             :  * Written by David Howells (dhowells@redhat.com).
      21             :  *
      22             :  * This implementation incorporates the write lock stealing work of
      23             :  * Michel Lespinasse <walken@google.com>.
      24             :  *
      25             :  * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
      26             :  */
      27             : 
      28             : #include <linux/list.h>
      29             : #include <linux/spinlock.h>
      30             : #include <linux/atomic.h>
      31             : #include <linux/tty.h>
      32             : #include <linux/sched.h>
      33             : #include <linux/sched/debug.h>
      34             : #include <linux/sched/task.h>
      35             : 
      36             : 
      37             : #if BITS_PER_LONG == 64
      38             : # define LDSEM_ACTIVE_MASK      0xffffffffL
      39             : #else
      40             : # define LDSEM_ACTIVE_MASK      0x0000ffffL
      41             : #endif
      42             : 
      43             : #define LDSEM_UNLOCKED          0L
      44             : #define LDSEM_ACTIVE_BIAS       1L
      45             : #define LDSEM_WAIT_BIAS         (-LDSEM_ACTIVE_MASK-1)
      46             : #define LDSEM_READ_BIAS         LDSEM_ACTIVE_BIAS
      47             : #define LDSEM_WRITE_BIAS        (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
      48             : 
      49             : struct ldsem_waiter {
      50             :         struct list_head list;
      51             :         struct task_struct *task;
      52             : };
      53             : 
      54             : /*
      55             :  * Initialize an ldsem:
      56             :  */
      57         163 : void __init_ldsem(struct ld_semaphore *sem, const char *name,
      58             :                   struct lock_class_key *key)
      59             : {
      60             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
      61             :         /*
      62             :          * Make sure we are not reinitializing a held semaphore:
      63             :          */
      64         163 :         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
      65         163 :         lockdep_init_map(&sem->dep_map, name, key, 0);
      66             : #endif
      67         163 :         atomic_long_set(&sem->count, LDSEM_UNLOCKED);
      68         163 :         sem->wait_readers = 0;
      69         163 :         raw_spin_lock_init(&sem->wait_lock);
      70         163 :         INIT_LIST_HEAD(&sem->read_wait);
      71         163 :         INIT_LIST_HEAD(&sem->write_wait);
      72         163 : }
      73             : 
      74           0 : static void __ldsem_wake_readers(struct ld_semaphore *sem)
      75             : {
      76           0 :         struct ldsem_waiter *waiter, *next;
      77           0 :         struct task_struct *tsk;
      78           0 :         long adjust, count;
      79             : 
      80             :         /*
      81             :          * Try to grant read locks to all readers on the read wait list.
      82             :          * Note the 'active part' of the count is incremented by
      83             :          * the number of readers before waking any processes up.
      84             :          */
      85           0 :         adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
      86           0 :         count = atomic_long_add_return(adjust, &sem->count);
      87           0 :         do {
      88           0 :                 if (count > 0)
      89             :                         break;
      90           0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
      91           0 :                         return;
      92             :         } while (1);
      93             : 
      94           0 :         list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
      95           0 :                 tsk = waiter->task;
      96           0 :                 smp_store_release(&waiter->task, NULL);
      97           0 :                 wake_up_process(tsk);
      98           0 :                 put_task_struct(tsk);
      99             :         }
     100           0 :         INIT_LIST_HEAD(&sem->read_wait);
     101           0 :         sem->wait_readers = 0;
     102             : }
     103             : 
     104           0 : static inline int writer_trylock(struct ld_semaphore *sem)
     105             : {
     106             :         /*
     107             :          * Only wake this writer if the active part of the count can be
     108             :          * transitioned from 0 -> 1
     109             :          */
     110           0 :         long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
     111           0 :         do {
     112           0 :                 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
     113             :                         return 1;
     114           0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
     115             :                         return 0;
     116             :         } while (1);
     117             : }
     118             : 
     119           0 : static void __ldsem_wake_writer(struct ld_semaphore *sem)
     120             : {
     121           0 :         struct ldsem_waiter *waiter;
     122             : 
     123           0 :         waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
     124           0 :         wake_up_process(waiter->task);
     125           0 : }
     126             : 
     127             : /*
     128             :  * handle the lock release when processes blocked on it that can now run
     129             :  * - if we come here from up_xxxx(), then:
     130             :  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
     131             :  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
     132             :  * - the spinlock must be held by the caller
     133             :  * - woken process blocks are discarded from the list after having task zeroed
     134             :  */
     135           0 : static void __ldsem_wake(struct ld_semaphore *sem)
     136             : {
     137           0 :         if (!list_empty(&sem->write_wait))
     138           0 :                 __ldsem_wake_writer(sem);
     139           0 :         else if (!list_empty(&sem->read_wait))
     140           0 :                 __ldsem_wake_readers(sem);
     141           0 : }
     142             : 
     143           0 : static void ldsem_wake(struct ld_semaphore *sem)
     144             : {
     145           0 :         unsigned long flags;
     146             : 
     147           0 :         raw_spin_lock_irqsave(&sem->wait_lock, flags);
     148           0 :         __ldsem_wake(sem);
     149           0 :         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
     150           0 : }
     151             : 
     152             : /*
     153             :  * wait for the read lock to be granted
     154             :  */
     155             : static struct ld_semaphore __sched *
     156           0 : down_read_failed(struct ld_semaphore *sem, long count, long timeout)
     157             : {
     158           0 :         struct ldsem_waiter waiter;
     159           0 :         long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
     160             : 
     161             :         /* set up my own style of waitqueue */
     162           0 :         raw_spin_lock_irq(&sem->wait_lock);
     163             : 
     164             :         /*
     165             :          * Try to reverse the lock attempt but if the count has changed
     166             :          * so that reversing fails, check if there are are no waiters,
     167             :          * and early-out if not
     168             :          */
     169           0 :         do {
     170           0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
     171           0 :                         count += adjust;
     172           0 :                         break;
     173             :                 }
     174           0 :                 if (count > 0) {
     175           0 :                         raw_spin_unlock_irq(&sem->wait_lock);
     176           0 :                         return sem;
     177             :                 }
     178             :         } while (1);
     179             : 
     180           0 :         list_add_tail(&waiter.list, &sem->read_wait);
     181           0 :         sem->wait_readers++;
     182             : 
     183           0 :         waiter.task = current;
     184           0 :         get_task_struct(current);
     185             : 
     186             :         /* if there are no active locks, wake the new lock owner(s) */
     187           0 :         if ((count & LDSEM_ACTIVE_MASK) == 0)
     188           0 :                 __ldsem_wake(sem);
     189             : 
     190           0 :         raw_spin_unlock_irq(&sem->wait_lock);
     191             : 
     192             :         /* wait to be given the lock */
     193           0 :         for (;;) {
     194           0 :                 set_current_state(TASK_UNINTERRUPTIBLE);
     195             : 
     196           0 :                 if (!smp_load_acquire(&waiter.task))
     197             :                         break;
     198           0 :                 if (!timeout)
     199             :                         break;
     200           0 :                 timeout = schedule_timeout(timeout);
     201             :         }
     202             : 
     203           0 :         __set_current_state(TASK_RUNNING);
     204             : 
     205           0 :         if (!timeout) {
     206             :                 /*
     207             :                  * Lock timed out but check if this task was just
     208             :                  * granted lock ownership - if so, pretend there
     209             :                  * was no timeout; otherwise, cleanup lock wait.
     210             :                  */
     211           0 :                 raw_spin_lock_irq(&sem->wait_lock);
     212           0 :                 if (waiter.task) {
     213           0 :                         atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
     214           0 :                         sem->wait_readers--;
     215           0 :                         list_del(&waiter.list);
     216           0 :                         raw_spin_unlock_irq(&sem->wait_lock);
     217           0 :                         put_task_struct(waiter.task);
     218           0 :                         return NULL;
     219             :                 }
     220           0 :                 raw_spin_unlock_irq(&sem->wait_lock);
     221             :         }
     222             : 
     223             :         return sem;
     224             : }
     225             : 
     226             : /*
     227             :  * wait for the write lock to be granted
     228             :  */
     229             : static struct ld_semaphore __sched *
     230           0 : down_write_failed(struct ld_semaphore *sem, long count, long timeout)
     231             : {
     232           0 :         struct ldsem_waiter waiter;
     233           0 :         long adjust = -LDSEM_ACTIVE_BIAS;
     234           0 :         int locked = 0;
     235             : 
     236             :         /* set up my own style of waitqueue */
     237           0 :         raw_spin_lock_irq(&sem->wait_lock);
     238             : 
     239             :         /*
     240             :          * Try to reverse the lock attempt but if the count has changed
     241             :          * so that reversing fails, check if the lock is now owned,
     242             :          * and early-out if so.
     243             :          */
     244           0 :         do {
     245           0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
     246             :                         break;
     247           0 :                 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
     248           0 :                         raw_spin_unlock_irq(&sem->wait_lock);
     249           0 :                         return sem;
     250             :                 }
     251             :         } while (1);
     252             : 
     253           0 :         list_add_tail(&waiter.list, &sem->write_wait);
     254             : 
     255           0 :         waiter.task = current;
     256             : 
     257           0 :         set_current_state(TASK_UNINTERRUPTIBLE);
     258           0 :         for (;;) {
     259           0 :                 if (!timeout)
     260             :                         break;
     261           0 :                 raw_spin_unlock_irq(&sem->wait_lock);
     262           0 :                 timeout = schedule_timeout(timeout);
     263           0 :                 raw_spin_lock_irq(&sem->wait_lock);
     264           0 :                 set_current_state(TASK_UNINTERRUPTIBLE);
     265           0 :                 locked = writer_trylock(sem);
     266           0 :                 if (locked)
     267             :                         break;
     268             :         }
     269             : 
     270           0 :         if (!locked)
     271           0 :                 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
     272           0 :         list_del(&waiter.list);
     273             : 
     274             :         /*
     275             :          * In case of timeout, wake up every reader who gave the right of way
     276             :          * to writer. Prevent separation readers into two groups:
     277             :          * one that helds semaphore and another that sleeps.
     278             :          * (in case of no contention with a writer)
     279             :          */
     280           0 :         if (!locked && list_empty(&sem->write_wait))
     281           0 :                 __ldsem_wake_readers(sem);
     282             : 
     283           0 :         raw_spin_unlock_irq(&sem->wait_lock);
     284             : 
     285           0 :         __set_current_state(TASK_RUNNING);
     286             : 
     287             :         /* lock wait may have timed out */
     288           0 :         if (!locked)
     289           0 :                 return NULL;
     290             :         return sem;
     291             : }
     292             : 
     293             : 
     294             : 
     295         330 : static int __ldsem_down_read_nested(struct ld_semaphore *sem,
     296             :                                            int subclass, long timeout)
     297             : {
     298         330 :         long count;
     299             : 
     300         330 :         rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
     301             : 
     302         330 :         count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
     303         330 :         if (count <= 0) {
     304           0 :                 lock_contended(&sem->dep_map, _RET_IP_);
     305           0 :                 if (!down_read_failed(sem, count, timeout)) {
     306           0 :                         rwsem_release(&sem->dep_map, _RET_IP_);
     307           0 :                         return 0;
     308             :                 }
     309             :         }
     310             :         lock_acquired(&sem->dep_map, _RET_IP_);
     311             :         return 1;
     312             : }
     313             : 
     314         325 : static int __ldsem_down_write_nested(struct ld_semaphore *sem,
     315             :                                             int subclass, long timeout)
     316             : {
     317         325 :         long count;
     318             : 
     319         325 :         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
     320             : 
     321         325 :         count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
     322         325 :         if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
     323           0 :                 lock_contended(&sem->dep_map, _RET_IP_);
     324           0 :                 if (!down_write_failed(sem, count, timeout)) {
     325           0 :                         rwsem_release(&sem->dep_map, _RET_IP_);
     326           0 :                         return 0;
     327             :                 }
     328             :         }
     329             :         lock_acquired(&sem->dep_map, _RET_IP_);
     330             :         return 1;
     331             : }
     332             : 
     333             : 
     334             : /*
     335             :  * lock for reading -- returns 1 if successful, 0 if timed out
     336             :  */
     337         330 : int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
     338             : {
     339         330 :         might_sleep();
     340         330 :         return __ldsem_down_read_nested(sem, 0, timeout);
     341             : }
     342             : 
     343             : /*
     344             :  * trylock for reading -- returns 1 if successful, 0 if contention
     345             :  */
     346         286 : int ldsem_down_read_trylock(struct ld_semaphore *sem)
     347             : {
     348         286 :         long count = atomic_long_read(&sem->count);
     349             : 
     350         286 :         while (count >= 0) {
     351         572 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
     352         286 :                         rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
     353         286 :                         lock_acquired(&sem->dep_map, _RET_IP_);
     354         286 :                         return 1;
     355             :                 }
     356             :         }
     357             :         return 0;
     358             : }
     359             : 
     360             : /*
     361             :  * lock for writing -- returns 1 if successful, 0 if timed out
     362             :  */
     363         325 : int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
     364             : {
     365         325 :         might_sleep();
     366         325 :         return __ldsem_down_write_nested(sem, 0, timeout);
     367             : }
     368             : 
     369             : /*
     370             :  * trylock for writing -- returns 1 if successful, 0 if contention
     371             :  */
     372           0 : int ldsem_down_write_trylock(struct ld_semaphore *sem)
     373             : {
     374           0 :         long count = atomic_long_read(&sem->count);
     375             : 
     376           0 :         while ((count & LDSEM_ACTIVE_MASK) == 0) {
     377           0 :                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
     378           0 :                         rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
     379           0 :                         lock_acquired(&sem->dep_map, _RET_IP_);
     380           0 :                         return 1;
     381             :                 }
     382             :         }
     383             :         return 0;
     384             : }
     385             : 
     386             : /*
     387             :  * release a read lock
     388             :  */
     389         616 : void ldsem_up_read(struct ld_semaphore *sem)
     390             : {
     391         616 :         long count;
     392             : 
     393         616 :         rwsem_release(&sem->dep_map, _RET_IP_);
     394             : 
     395         616 :         count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
     396         616 :         if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
     397           0 :                 ldsem_wake(sem);
     398         616 : }
     399             : 
     400             : /*
     401             :  * release a write lock
     402             :  */
     403         325 : void ldsem_up_write(struct ld_semaphore *sem)
     404             : {
     405         325 :         long count;
     406             : 
     407         325 :         rwsem_release(&sem->dep_map, _RET_IP_);
     408             : 
     409         325 :         count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
     410         325 :         if (count < 0)
     411           0 :                 ldsem_wake(sem);
     412         325 : }
     413             : 
     414             : 
     415             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     416             : 
     417           0 : int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
     418             : {
     419           0 :         might_sleep();
     420           0 :         return __ldsem_down_read_nested(sem, subclass, timeout);
     421             : }
     422             : 
     423           0 : int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
     424             :                             long timeout)
     425             : {
     426           0 :         might_sleep();
     427           0 :         return __ldsem_down_write_nested(sem, subclass, timeout);
     428             : }
     429             : 
     430             : #endif

Generated by: LCOV version 1.14