LCOV - code coverage report
Current view: top level - kernel/printk - printk_safe.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 53 145 36.6 %
Date: 2021-04-22 12:43:58 Functions: 9 16 56.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * printk_safe.c - Safe printk for printk-deadlock-prone contexts
       4             :  */
       5             : 
       6             : #include <linux/preempt.h>
       7             : #include <linux/spinlock.h>
       8             : #include <linux/debug_locks.h>
       9             : #include <linux/kdb.h>
      10             : #include <linux/smp.h>
      11             : #include <linux/cpumask.h>
      12             : #include <linux/irq_work.h>
      13             : #include <linux/printk.h>
      14             : #include <linux/kprobes.h>
      15             : 
      16             : #include "internal.h"
      17             : 
      18             : /*
      19             :  * printk() could not take logbuf_lock in NMI context. Instead,
      20             :  * it uses an alternative implementation that temporary stores
      21             :  * the strings into a per-CPU buffer. The content of the buffer
      22             :  * is later flushed into the main ring buffer via IRQ work.
      23             :  *
      24             :  * The alternative implementation is chosen transparently
      25             :  * by examining current printk() context mask stored in @printk_context
      26             :  * per-CPU variable.
      27             :  *
      28             :  * The implementation allows to flush the strings also from another CPU.
      29             :  * There are situations when we want to make sure that all buffers
      30             :  * were handled or when IRQs are blocked.
      31             :  */
      32             : 
      33             : #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -       \
      34             :                                 sizeof(atomic_t) -                      \
      35             :                                 sizeof(atomic_t) -                      \
      36             :                                 sizeof(struct irq_work))
      37             : 
      38             : struct printk_safe_seq_buf {
      39             :         atomic_t                len;    /* length of written data */
      40             :         atomic_t                message_lost;
      41             :         struct irq_work         work;   /* IRQ work that flushes the buffer */
      42             :         unsigned char           buffer[SAFE_LOG_BUF_LEN];
      43             : };
      44             : 
      45             : static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
      46             : static DEFINE_PER_CPU(int, printk_context);
      47             : 
      48             : static DEFINE_RAW_SPINLOCK(safe_read_lock);
      49             : 
      50             : #ifdef CONFIG_PRINTK_NMI
      51             : static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
      52             : #endif
      53             : 
      54             : /* Get flushed in a more safe context. */
      55           0 : static void queue_flush_work(struct printk_safe_seq_buf *s)
      56             : {
      57           0 :         if (printk_percpu_data_ready())
      58           0 :                 irq_work_queue(&s->work);
      59           0 : }
      60             : 
      61             : /*
      62             :  * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
      63             :  * have dedicated buffers, because otherwise printk-safe preempted by
      64             :  * NMI-printk would have overwritten the NMI messages.
      65             :  *
      66             :  * The messages are flushed from irq work (or from panic()), possibly,
      67             :  * from other CPU, concurrently with printk_safe_log_store(). Should this
      68             :  * happen, printk_safe_log_store() will notice the buffer->len mismatch
      69             :  * and repeat the write.
      70             :  */
      71           0 : static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
      72             :                                                 const char *fmt, va_list args)
      73             : {
      74           0 :         int add;
      75           0 :         size_t len;
      76           0 :         va_list ap;
      77             : 
      78           0 : again:
      79           0 :         len = atomic_read(&s->len);
      80             : 
      81             :         /* The trailing '\0' is not counted into len. */
      82           0 :         if (len >= sizeof(s->buffer) - 1) {
      83           0 :                 atomic_inc(&s->message_lost);
      84           0 :                 queue_flush_work(s);
      85           0 :                 return 0;
      86             :         }
      87             : 
      88             :         /*
      89             :          * Make sure that all old data have been read before the buffer
      90             :          * was reset. This is not needed when we just append data.
      91             :          */
      92           0 :         if (!len)
      93           0 :                 smp_rmb();
      94             : 
      95           0 :         va_copy(ap, args);
      96           0 :         add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
      97           0 :         va_end(ap);
      98           0 :         if (!add)
      99             :                 return 0;
     100             : 
     101             :         /*
     102             :          * Do it once again if the buffer has been flushed in the meantime.
     103             :          * Note that atomic_cmpxchg() is an implicit memory barrier that
     104             :          * makes sure that the data were written before updating s->len.
     105             :          */
     106           0 :         if (atomic_cmpxchg(&s->len, len, len + add) != len)
     107           0 :                 goto again;
     108             : 
     109           0 :         queue_flush_work(s);
     110           0 :         return add;
     111             : }
     112             : 
     113           0 : static inline void printk_safe_flush_line(const char *text, int len)
     114             : {
     115             :         /*
     116             :          * Avoid any console drivers calls from here, because we may be
     117             :          * in NMI or printk_safe context (when in panic). The messages
     118             :          * must go only into the ring buffer at this stage.  Consoles will
     119             :          * get explicitly called later when a crashdump is not generated.
     120             :          */
     121           0 :         printk_deferred("%.*s", len, text);
     122           0 : }
     123             : 
     124             : /* printk part of the temporary buffer line by line */
     125           0 : static int printk_safe_flush_buffer(const char *start, size_t len)
     126             : {
     127           0 :         const char *c, *end;
     128           0 :         bool header;
     129             : 
     130           0 :         c = start;
     131           0 :         end = start + len;
     132           0 :         header = true;
     133             : 
     134             :         /* Print line by line. */
     135           0 :         while (c < end) {
     136           0 :                 if (*c == '\n') {
     137           0 :                         printk_safe_flush_line(start, c - start + 1);
     138           0 :                         start = ++c;
     139           0 :                         header = true;
     140           0 :                         continue;
     141             :                 }
     142             : 
     143             :                 /* Handle continuous lines or missing new line. */
     144           0 :                 if ((c + 1 < end) && printk_get_level(c)) {
     145           0 :                         if (header) {
     146           0 :                                 c = printk_skip_level(c);
     147           0 :                                 continue;
     148             :                         }
     149             : 
     150           0 :                         printk_safe_flush_line(start, c - start);
     151           0 :                         start = c++;
     152           0 :                         header = true;
     153           0 :                         continue;
     154             :                 }
     155             : 
     156             :                 header = false;
     157             :                 c++;
     158             :         }
     159             : 
     160             :         /* Check if there was a partial line. Ignore pure header. */
     161           0 :         if (start < end && !header) {
     162           0 :                 static const char newline[] = KERN_CONT "\n";
     163             : 
     164           0 :                 printk_safe_flush_line(start, end - start);
     165           0 :                 printk_safe_flush_line(newline, strlen(newline));
     166             :         }
     167             : 
     168           0 :         return len;
     169             : }
     170             : 
     171           8 : static void report_message_lost(struct printk_safe_seq_buf *s)
     172             : {
     173           8 :         int lost = atomic_xchg(&s->message_lost, 0);
     174             : 
     175           8 :         if (lost)
     176           0 :                 printk_deferred("Lost %d message(s)!\n", lost);
     177           8 : }
     178             : 
     179             : /*
     180             :  * Flush data from the associated per-CPU buffer. The function
     181             :  * can be called either via IRQ work or independently.
     182             :  */
     183           8 : static void __printk_safe_flush(struct irq_work *work)
     184             : {
     185           8 :         struct printk_safe_seq_buf *s =
     186           8 :                 container_of(work, struct printk_safe_seq_buf, work);
     187           8 :         unsigned long flags;
     188           8 :         size_t len;
     189           8 :         int i;
     190             : 
     191             :         /*
     192             :          * The lock has two functions. First, one reader has to flush all
     193             :          * available message to make the lockless synchronization with
     194             :          * writers easier. Second, we do not want to mix messages from
     195             :          * different CPUs. This is especially important when printing
     196             :          * a backtrace.
     197             :          */
     198           8 :         raw_spin_lock_irqsave(&safe_read_lock, flags);
     199             : 
     200           8 :         i = 0;
     201           8 : more:
     202           8 :         len = atomic_read(&s->len);
     203             : 
     204             :         /*
     205             :          * This is just a paranoid check that nobody has manipulated
     206             :          * the buffer an unexpected way. If we printed something then
     207             :          * @len must only increase. Also it should never overflow the
     208             :          * buffer size.
     209             :          */
     210           8 :         if ((i && i >= len) || len > sizeof(s->buffer)) {
     211           0 :                 const char *msg = "printk_safe_flush: internal error\n";
     212             : 
     213           0 :                 printk_safe_flush_line(msg, strlen(msg));
     214           0 :                 len = 0;
     215             :         }
     216             : 
     217           8 :         if (!len)
     218           8 :                 goto out; /* Someone else has already flushed the buffer. */
     219             : 
     220             :         /* Make sure that data has been written up to the @len */
     221           0 :         smp_rmb();
     222           0 :         i += printk_safe_flush_buffer(s->buffer + i, len - i);
     223             : 
     224             :         /*
     225             :          * Check that nothing has got added in the meantime and truncate
     226             :          * the buffer. Note that atomic_cmpxchg() is an implicit memory
     227             :          * barrier that makes sure that the data were copied before
     228             :          * updating s->len.
     229             :          */
     230           0 :         if (atomic_cmpxchg(&s->len, len, 0) != len)
     231           0 :                 goto more;
     232             : 
     233           0 : out:
     234           8 :         report_message_lost(s);
     235           8 :         raw_spin_unlock_irqrestore(&safe_read_lock, flags);
     236           8 : }
     237             : 
     238             : /**
     239             :  * printk_safe_flush - flush all per-cpu nmi buffers.
     240             :  *
     241             :  * The buffers are flushed automatically via IRQ work. This function
     242             :  * is useful only when someone wants to be sure that all buffers have
     243             :  * been flushed at some point.
     244             :  */
     245           1 : void printk_safe_flush(void)
     246             : {
     247           1 :         int cpu;
     248             : 
     249           5 :         for_each_possible_cpu(cpu) {
     250             : #ifdef CONFIG_PRINTK_NMI
     251           4 :                 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
     252             : #endif
     253           4 :                 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
     254             :         }
     255           1 : }
     256             : 
     257             : /**
     258             :  * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
     259             :  *      goes down.
     260             :  *
     261             :  * Similar to printk_safe_flush() but it can be called even in NMI context when
     262             :  * the system goes down. It does the best effort to get NMI messages into
     263             :  * the main ring buffer.
     264             :  *
     265             :  * Note that it could try harder when there is only one CPU online.
     266             :  */
     267           0 : void printk_safe_flush_on_panic(void)
     268             : {
     269             :         /*
     270             :          * Make sure that we could access the main ring buffer.
     271             :          * Do not risk a double release when more CPUs are up.
     272             :          */
     273           0 :         if (raw_spin_is_locked(&logbuf_lock)) {
     274           0 :                 if (num_online_cpus() > 1)
     275             :                         return;
     276             : 
     277           0 :                 debug_locks_off();
     278           0 :                 raw_spin_lock_init(&logbuf_lock);
     279             :         }
     280             : 
     281           0 :         if (raw_spin_is_locked(&safe_read_lock)) {
     282           0 :                 if (num_online_cpus() > 1)
     283             :                         return;
     284             : 
     285           0 :                 debug_locks_off();
     286           0 :                 raw_spin_lock_init(&safe_read_lock);
     287             :         }
     288             : 
     289           0 :         printk_safe_flush();
     290             : }
     291             : 
     292             : #ifdef CONFIG_PRINTK_NMI
     293             : /*
     294             :  * Safe printk() for NMI context. It uses a per-CPU buffer to
     295             :  * store the message. NMIs are not nested, so there is always only
     296             :  * one writer running. But the buffer might get flushed from another
     297             :  * CPU, so we need to be careful.
     298             :  */
     299           0 : static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
     300             : {
     301           0 :         struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
     302             : 
     303           0 :         return printk_safe_log_store(s, fmt, args);
     304             : }
     305             : 
     306           1 : void noinstr printk_nmi_enter(void)
     307             : {
     308           1 :         this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
     309           1 : }
     310             : 
     311           1 : void noinstr printk_nmi_exit(void)
     312             : {
     313           1 :         this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
     314           1 : }
     315             : 
     316             : /*
     317             :  * Marks a code that might produce many messages in NMI context
     318             :  * and the risk of losing them is more critical than eventual
     319             :  * reordering.
     320             :  *
     321             :  * It has effect only when called in NMI context. Then printk()
     322             :  * will try to store the messages into the main logbuf directly
     323             :  * and use the per-CPU buffers only as a fallback when the lock
     324             :  * is not available.
     325             :  */
     326           0 : void printk_nmi_direct_enter(void)
     327             : {
     328           0 :         if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
     329           0 :                 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
     330           0 : }
     331             : 
     332           0 : void printk_nmi_direct_exit(void)
     333             : {
     334           0 :         this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
     335           0 : }
     336             : 
     337             : #else
     338             : 
     339             : static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
     340             : {
     341             :         return 0;
     342             : }
     343             : 
     344             : #endif /* CONFIG_PRINTK_NMI */
     345             : 
     346             : /*
     347             :  * Lock-less printk(), to avoid deadlocks should the printk() recurse
     348             :  * into itself. It uses a per-CPU buffer to store the message, just like
     349             :  * NMI.
     350             :  */
     351           0 : static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
     352             : {
     353           0 :         struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
     354             : 
     355           0 :         return printk_safe_log_store(s, fmt, args);
     356             : }
     357             : 
     358             : /* Can be preempted by NMI. */
     359        2631 : void __printk_safe_enter(void)
     360             : {
     361        2631 :         this_cpu_inc(printk_context);
     362        2632 : }
     363             : 
     364             : /* Can be preempted by NMI. */
     365        2632 : void __printk_safe_exit(void)
     366             : {
     367        2632 :         this_cpu_dec(printk_context);
     368        2632 : }
     369             : 
     370         291 : __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
     371             : {
     372             : #ifdef CONFIG_KGDB_KDB
     373             :         /* Allow to pass printk() to kdb but avoid a recursion. */
     374             :         if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
     375             :                 return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
     376             : #endif
     377             : 
     378             :         /*
     379             :          * Try to use the main logbuf even in NMI. But avoid calling console
     380             :          * drivers that might have their own locks.
     381             :          */
     382         291 :         if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
     383           0 :             raw_spin_trylock(&logbuf_lock)) {
     384           0 :                 int len;
     385             : 
     386           0 :                 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
     387           0 :                 raw_spin_unlock(&logbuf_lock);
     388           0 :                 defer_console_output();
     389           0 :                 return len;
     390             :         }
     391             : 
     392             :         /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
     393         291 :         if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
     394           0 :                 return vprintk_nmi(fmt, args);
     395             : 
     396             :         /* Use extra buffer to prevent a recursion deadlock in safe mode. */
     397         291 :         if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
     398           0 :                 return vprintk_safe(fmt, args);
     399             : 
     400             :         /* No obstacles. */
     401         291 :         return vprintk_default(fmt, args);
     402             : }
     403             : 
     404           1 : void __init printk_safe_init(void)
     405             : {
     406           1 :         int cpu;
     407             : 
     408           6 :         for_each_possible_cpu(cpu) {
     409           4 :                 struct printk_safe_seq_buf *s;
     410             : 
     411           4 :                 s = &per_cpu(safe_print_seq, cpu);
     412           4 :                 init_irq_work(&s->work, __printk_safe_flush);
     413             : 
     414             : #ifdef CONFIG_PRINTK_NMI
     415           4 :                 s = &per_cpu(nmi_print_seq, cpu);
     416           5 :                 init_irq_work(&s->work, __printk_safe_flush);
     417             : #endif
     418             :         }
     419             : 
     420             :         /* Flush pending messages that did not have scheduled IRQ works. */
     421           1 :         printk_safe_flush();
     422           1 : }

Generated by: LCOV version 1.14