LCOV - code coverage report
Current view: top level - block - blk-mq-cpumap.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 29 32 90.6 %
Date: 2021-04-22 12:43:58 Functions: 3 3 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * CPU <-> hardware queue mapping helpers
       4             :  *
       5             :  * Copyright (C) 2013-2014 Jens Axboe
       6             :  */
       7             : #include <linux/kernel.h>
       8             : #include <linux/threads.h>
       9             : #include <linux/module.h>
      10             : #include <linux/mm.h>
      11             : #include <linux/smp.h>
      12             : #include <linux/cpu.h>
      13             : 
      14             : #include <linux/blk-mq.h>
      15             : #include "blk.h"
      16             : #include "blk-mq.h"
      17             : 
      18          36 : static int queue_index(struct blk_mq_queue_map *qmap,
      19             :                        unsigned int nr_queues, const int q)
      20             : {
      21          36 :         return qmap->queue_offset + (q % nr_queues);
      22             : }
      23             : 
      24          27 : static int get_first_sibling(unsigned int cpu)
      25             : {
      26          27 :         unsigned int ret;
      27             : 
      28          27 :         ret = cpumask_first(topology_sibling_cpumask(cpu));
      29          27 :         if (ret < nr_cpu_ids)
      30          27 :                 return ret;
      31             : 
      32           0 :         return cpu;
      33             : }
      34             : 
      35           9 : int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
      36             : {
      37           9 :         unsigned int *map = qmap->mq_map;
      38           9 :         unsigned int nr_queues = qmap->nr_queues;
      39           9 :         unsigned int cpu, first_sibling, q = 0;
      40             : 
      41          45 :         for_each_possible_cpu(cpu)
      42          36 :                 map[cpu] = -1;
      43             : 
      44             :         /*
      45             :          * Spread queues among present CPUs first for minimizing
      46             :          * count of dead queues which are mapped by all un-present CPUs
      47             :          */
      48          18 :         for_each_present_cpu(cpu) {
      49          18 :                 if (q >= nr_queues)
      50             :                         break;
      51           9 :                 map[cpu] = queue_index(qmap, nr_queues, q++);
      52             :         }
      53             : 
      54          45 :         for_each_possible_cpu(cpu) {
      55          36 :                 if (map[cpu] != -1)
      56           9 :                         continue;
      57             :                 /*
      58             :                  * First do sequential mapping between CPUs and queues.
      59             :                  * In case we still have CPUs to map, and we have some number of
      60             :                  * threads per cores then map sibling threads to the same queue
      61             :                  * for performance optimizations.
      62             :                  */
      63          27 :                 if (q < nr_queues) {
      64           0 :                         map[cpu] = queue_index(qmap, nr_queues, q++);
      65             :                 } else {
      66          27 :                         first_sibling = get_first_sibling(cpu);
      67          27 :                         if (first_sibling == cpu)
      68          27 :                                 map[cpu] = queue_index(qmap, nr_queues, q++);
      69             :                         else
      70           0 :                                 map[cpu] = map[first_sibling];
      71             :                 }
      72             :         }
      73             : 
      74           9 :         return 0;
      75             : }
      76             : EXPORT_SYMBOL_GPL(blk_mq_map_queues);
      77             : 
      78             : /**
      79             :  * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
      80             :  * @qmap: CPU to hardware queue map.
      81             :  * @index: hardware queue index.
      82             :  *
      83             :  * We have no quick way of doing reverse lookups. This is only used at
      84             :  * queue init time, so runtime isn't important.
      85             :  */
      86          27 : int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
      87             : {
      88          27 :         int i;
      89             : 
      90          27 :         for_each_possible_cpu(i) {
      91          27 :                 if (index == qmap->mq_map[i])
      92          27 :                         return cpu_to_node(i);
      93             :         }
      94             : 
      95             :         return NUMA_NO_NODE;
      96             : }

Generated by: LCOV version 1.14