LCOV - code coverage report
Current view: top level - kernel/irq - migration.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 37 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 3 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : 
       3             : #include <linux/irq.h>
       4             : #include <linux/interrupt.h>
       5             : 
       6             : #include "internals.h"
       7             : 
       8             : /**
       9             :  * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
      10             :  * @desc:               Interrupt descpriptor to clean up
      11             :  * @force_clear:        If set clear the move pending bit unconditionally.
      12             :  *                      If not set, clear it only when the dying CPU is the
      13             :  *                      last one in the pending mask.
      14             :  *
      15             :  * Returns true if the pending bit was set and the pending mask contains an
      16             :  * online CPU other than the dying CPU.
      17             :  */
      18           0 : bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
      19             : {
      20           0 :         struct irq_data *data = irq_desc_get_irq_data(desc);
      21             : 
      22           0 :         if (!irqd_is_setaffinity_pending(data))
      23             :                 return false;
      24             : 
      25             :         /*
      26             :          * The outgoing CPU might be the last online target in a pending
      27             :          * interrupt move. If that's the case clear the pending move bit.
      28             :          */
      29           0 :         if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
      30           0 :                 irqd_clr_move_pending(data);
      31           0 :                 return false;
      32             :         }
      33           0 :         if (force_clear)
      34           0 :                 irqd_clr_move_pending(data);
      35             :         return true;
      36             : }
      37             : 
      38           0 : void irq_move_masked_irq(struct irq_data *idata)
      39             : {
      40           0 :         struct irq_desc *desc = irq_data_to_desc(idata);
      41           0 :         struct irq_data *data = &desc->irq_data;
      42           0 :         struct irq_chip *chip = data->chip;
      43             : 
      44           0 :         if (likely(!irqd_is_setaffinity_pending(data)))
      45             :                 return;
      46             : 
      47           0 :         irqd_clr_move_pending(data);
      48             : 
      49             :         /*
      50             :          * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
      51             :          */
      52           0 :         if (irqd_is_per_cpu(data)) {
      53           0 :                 WARN_ON(1);
      54           0 :                 return;
      55             :         }
      56             : 
      57           0 :         if (unlikely(cpumask_empty(desc->pending_mask)))
      58             :                 return;
      59             : 
      60           0 :         if (!chip->irq_set_affinity)
      61             :                 return;
      62             : 
      63           0 :         assert_raw_spin_locked(&desc->lock);
      64             : 
      65             :         /*
      66             :          * If there was a valid mask to work with, please
      67             :          * do the disable, re-program, enable sequence.
      68             :          * This is *not* particularly important for level triggered
      69             :          * but in a edge trigger case, we might be setting rte
      70             :          * when an active trigger is coming in. This could
      71             :          * cause some ioapics to mal-function.
      72             :          * Being paranoid i guess!
      73             :          *
      74             :          * For correct operation this depends on the caller
      75             :          * masking the irqs.
      76             :          */
      77           0 :         if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
      78           0 :                 int ret;
      79             : 
      80           0 :                 ret = irq_do_set_affinity(data, desc->pending_mask, false);
      81             :                 /*
      82             :                  * If the there is a cleanup pending in the underlying
      83             :                  * vector management, reschedule the move for the next
      84             :                  * interrupt. Leave desc->pending_mask intact.
      85             :                  */
      86           0 :                 if (ret == -EBUSY) {
      87           0 :                         irqd_set_move_pending(data);
      88           0 :                         return;
      89             :                 }
      90             :         }
      91           0 :         cpumask_clear(desc->pending_mask);
      92             : }
      93             : 
      94           0 : void __irq_move_irq(struct irq_data *idata)
      95             : {
      96           0 :         bool masked;
      97             : 
      98             :         /*
      99             :          * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
     100             :          * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
     101             :          * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
     102             :          */
     103           0 :         idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
     104             : 
     105           0 :         if (unlikely(irqd_irq_disabled(idata)))
     106             :                 return;
     107             : 
     108             :         /*
     109             :          * Be careful vs. already masked interrupts. If this is a
     110             :          * threaded interrupt with ONESHOT set, we can end up with an
     111             :          * interrupt storm.
     112             :          */
     113           0 :         masked = irqd_irq_masked(idata);
     114           0 :         if (!masked)
     115           0 :                 idata->chip->irq_mask(idata);
     116           0 :         irq_move_masked_irq(idata);
     117           0 :         if (!masked)
     118           0 :                 idata->chip->irq_unmask(idata);
     119             : }

Generated by: LCOV version 1.14