LCOV - code coverage report
Current view: top level - drivers/md - dm-stats.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 4 637 0.6 %
Date: 2021-04-22 12:43:58 Functions: 1 33 3.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/errno.h>
       3             : #include <linux/numa.h>
       4             : #include <linux/slab.h>
       5             : #include <linux/rculist.h>
       6             : #include <linux/threads.h>
       7             : #include <linux/preempt.h>
       8             : #include <linux/irqflags.h>
       9             : #include <linux/vmalloc.h>
      10             : #include <linux/mm.h>
      11             : #include <linux/module.h>
      12             : #include <linux/device-mapper.h>
      13             : 
      14             : #include "dm-core.h"
      15             : #include "dm-stats.h"
      16             : 
      17             : #define DM_MSG_PREFIX "stats"
      18             : 
      19             : static int dm_stat_need_rcu_barrier;
      20             : 
      21             : /*
      22             :  * Using 64-bit values to avoid overflow (which is a
      23             :  * problem that block/genhd.c's IO accounting has).
      24             :  */
      25             : struct dm_stat_percpu {
      26             :         unsigned long long sectors[2];
      27             :         unsigned long long ios[2];
      28             :         unsigned long long merges[2];
      29             :         unsigned long long ticks[2];
      30             :         unsigned long long io_ticks[2];
      31             :         unsigned long long io_ticks_total;
      32             :         unsigned long long time_in_queue;
      33             :         unsigned long long *histogram;
      34             : };
      35             : 
      36             : struct dm_stat_shared {
      37             :         atomic_t in_flight[2];
      38             :         unsigned long long stamp;
      39             :         struct dm_stat_percpu tmp;
      40             : };
      41             : 
      42             : struct dm_stat {
      43             :         struct list_head list_entry;
      44             :         int id;
      45             :         unsigned stat_flags;
      46             :         size_t n_entries;
      47             :         sector_t start;
      48             :         sector_t end;
      49             :         sector_t step;
      50             :         unsigned n_histogram_entries;
      51             :         unsigned long long *histogram_boundaries;
      52             :         const char *program_id;
      53             :         const char *aux_data;
      54             :         struct rcu_head rcu_head;
      55             :         size_t shared_alloc_size;
      56             :         size_t percpu_alloc_size;
      57             :         size_t histogram_alloc_size;
      58             :         struct dm_stat_percpu *stat_percpu[NR_CPUS];
      59             :         struct dm_stat_shared stat_shared[];
      60             : };
      61             : 
      62             : #define STAT_PRECISE_TIMESTAMPS         1
      63             : 
      64             : struct dm_stats_last_position {
      65             :         sector_t last_sector;
      66             :         unsigned last_rw;
      67             : };
      68             : 
      69             : /*
      70             :  * A typo on the command line could possibly make the kernel run out of memory
      71             :  * and crash. To prevent the crash we account all used memory. We fail if we
      72             :  * exhaust 1/4 of all memory or 1/2 of vmalloc space.
      73             :  */
      74             : #define DM_STATS_MEMORY_FACTOR          4
      75             : #define DM_STATS_VMALLOC_FACTOR         2
      76             : 
      77             : static DEFINE_SPINLOCK(shared_memory_lock);
      78             : 
      79             : static unsigned long shared_memory_amount;
      80             : 
      81           0 : static bool __check_shared_memory(size_t alloc_size)
      82             : {
      83           0 :         size_t a;
      84             : 
      85           0 :         a = shared_memory_amount + alloc_size;
      86           0 :         if (a < shared_memory_amount)
      87             :                 return false;
      88           0 :         if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
      89             :                 return false;
      90             : #ifdef CONFIG_MMU
      91           0 :         if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
      92           0 :                 return false;
      93             : #endif
      94             :         return true;
      95             : }
      96             : 
      97           0 : static bool check_shared_memory(size_t alloc_size)
      98             : {
      99           0 :         bool ret;
     100             : 
     101           0 :         spin_lock_irq(&shared_memory_lock);
     102             : 
     103           0 :         ret = __check_shared_memory(alloc_size);
     104             : 
     105           0 :         spin_unlock_irq(&shared_memory_lock);
     106             : 
     107           0 :         return ret;
     108             : }
     109             : 
     110           0 : static bool claim_shared_memory(size_t alloc_size)
     111             : {
     112           0 :         spin_lock_irq(&shared_memory_lock);
     113             : 
     114           0 :         if (!__check_shared_memory(alloc_size)) {
     115           0 :                 spin_unlock_irq(&shared_memory_lock);
     116           0 :                 return false;
     117             :         }
     118             : 
     119           0 :         shared_memory_amount += alloc_size;
     120             : 
     121           0 :         spin_unlock_irq(&shared_memory_lock);
     122             : 
     123           0 :         return true;
     124             : }
     125             : 
     126           0 : static void free_shared_memory(size_t alloc_size)
     127             : {
     128           0 :         unsigned long flags;
     129             : 
     130           0 :         spin_lock_irqsave(&shared_memory_lock, flags);
     131             : 
     132           0 :         if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
     133           0 :                 spin_unlock_irqrestore(&shared_memory_lock, flags);
     134           0 :                 DMCRIT("Memory usage accounting bug.");
     135           0 :                 return;
     136             :         }
     137             : 
     138           0 :         shared_memory_amount -= alloc_size;
     139             : 
     140           0 :         spin_unlock_irqrestore(&shared_memory_lock, flags);
     141             : }
     142             : 
     143           0 : static void *dm_kvzalloc(size_t alloc_size, int node)
     144             : {
     145           0 :         void *p;
     146             : 
     147           0 :         if (!claim_shared_memory(alloc_size))
     148             :                 return NULL;
     149             : 
     150           0 :         p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
     151           0 :         if (p)
     152             :                 return p;
     153             : 
     154           0 :         free_shared_memory(alloc_size);
     155             : 
     156           0 :         return NULL;
     157             : }
     158             : 
     159           0 : static void dm_kvfree(void *ptr, size_t alloc_size)
     160             : {
     161           0 :         if (!ptr)
     162             :                 return;
     163             : 
     164           0 :         free_shared_memory(alloc_size);
     165             : 
     166           0 :         kvfree(ptr);
     167             : }
     168             : 
     169           0 : static void dm_stat_free(struct rcu_head *head)
     170             : {
     171           0 :         int cpu;
     172           0 :         struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
     173             : 
     174           0 :         kfree(s->histogram_boundaries);
     175           0 :         kfree(s->program_id);
     176           0 :         kfree(s->aux_data);
     177           0 :         for_each_possible_cpu(cpu) {
     178           0 :                 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
     179           0 :                 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
     180             :         }
     181           0 :         dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
     182           0 :         dm_kvfree(s, s->shared_alloc_size);
     183           0 : }
     184             : 
     185           0 : static int dm_stat_in_flight(struct dm_stat_shared *shared)
     186             : {
     187           0 :         return atomic_read(&shared->in_flight[READ]) +
     188           0 :                atomic_read(&shared->in_flight[WRITE]);
     189             : }
     190             : 
     191           0 : void dm_stats_init(struct dm_stats *stats)
     192             : {
     193           0 :         int cpu;
     194           0 :         struct dm_stats_last_position *last;
     195             : 
     196           0 :         mutex_init(&stats->mutex);
     197           0 :         INIT_LIST_HEAD(&stats->list);
     198           0 :         stats->last = alloc_percpu(struct dm_stats_last_position);
     199           0 :         for_each_possible_cpu(cpu) {
     200           0 :                 last = per_cpu_ptr(stats->last, cpu);
     201           0 :                 last->last_sector = (sector_t)ULLONG_MAX;
     202           0 :                 last->last_rw = UINT_MAX;
     203             :         }
     204           0 : }
     205             : 
     206           0 : void dm_stats_cleanup(struct dm_stats *stats)
     207             : {
     208           0 :         size_t ni;
     209           0 :         struct dm_stat *s;
     210           0 :         struct dm_stat_shared *shared;
     211             : 
     212           0 :         while (!list_empty(&stats->list)) {
     213           0 :                 s = container_of(stats->list.next, struct dm_stat, list_entry);
     214           0 :                 list_del(&s->list_entry);
     215           0 :                 for (ni = 0; ni < s->n_entries; ni++) {
     216           0 :                         shared = &s->stat_shared[ni];
     217           0 :                         if (WARN_ON(dm_stat_in_flight(shared))) {
     218           0 :                                 DMCRIT("leaked in-flight counter at index %lu "
     219             :                                        "(start %llu, end %llu, step %llu): reads %d, writes %d",
     220             :                                        (unsigned long)ni,
     221             :                                        (unsigned long long)s->start,
     222             :                                        (unsigned long long)s->end,
     223             :                                        (unsigned long long)s->step,
     224             :                                        atomic_read(&shared->in_flight[READ]),
     225             :                                        atomic_read(&shared->in_flight[WRITE]));
     226             :                         }
     227             :                 }
     228           0 :                 dm_stat_free(&s->rcu_head);
     229             :         }
     230           0 :         free_percpu(stats->last);
     231           0 :         mutex_destroy(&stats->mutex);
     232           0 : }
     233             : 
     234           0 : static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
     235             :                            sector_t step, unsigned stat_flags,
     236             :                            unsigned n_histogram_entries,
     237             :                            unsigned long long *histogram_boundaries,
     238             :                            const char *program_id, const char *aux_data,
     239             :                            void (*suspend_callback)(struct mapped_device *),
     240             :                            void (*resume_callback)(struct mapped_device *),
     241             :                            struct mapped_device *md)
     242             : {
     243           0 :         struct list_head *l;
     244           0 :         struct dm_stat *s, *tmp_s;
     245           0 :         sector_t n_entries;
     246           0 :         size_t ni;
     247           0 :         size_t shared_alloc_size;
     248           0 :         size_t percpu_alloc_size;
     249           0 :         size_t histogram_alloc_size;
     250           0 :         struct dm_stat_percpu *p;
     251           0 :         int cpu;
     252           0 :         int ret_id;
     253           0 :         int r;
     254             : 
     255           0 :         if (end < start || !step)
     256             :                 return -EINVAL;
     257             : 
     258           0 :         n_entries = end - start;
     259           0 :         if (dm_sector_div64(n_entries, step))
     260           0 :                 n_entries++;
     261             : 
     262           0 :         if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
     263             :                 return -EOVERFLOW;
     264             : 
     265           0 :         shared_alloc_size = struct_size(s, stat_shared, n_entries);
     266           0 :         if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
     267             :                 return -EOVERFLOW;
     268             : 
     269           0 :         percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
     270           0 :         if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
     271             :                 return -EOVERFLOW;
     272             : 
     273           0 :         histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
     274           0 :         if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
     275             :                 return -EOVERFLOW;
     276             : 
     277           0 :         if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
     278           0 :                                  num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
     279             :                 return -ENOMEM;
     280             : 
     281           0 :         s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
     282           0 :         if (!s)
     283             :                 return -ENOMEM;
     284             : 
     285           0 :         s->stat_flags = stat_flags;
     286           0 :         s->n_entries = n_entries;
     287           0 :         s->start = start;
     288           0 :         s->end = end;
     289           0 :         s->step = step;
     290           0 :         s->shared_alloc_size = shared_alloc_size;
     291           0 :         s->percpu_alloc_size = percpu_alloc_size;
     292           0 :         s->histogram_alloc_size = histogram_alloc_size;
     293             : 
     294           0 :         s->n_histogram_entries = n_histogram_entries;
     295           0 :         s->histogram_boundaries = kmemdup(histogram_boundaries,
     296           0 :                                           s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
     297           0 :         if (!s->histogram_boundaries) {
     298           0 :                 r = -ENOMEM;
     299           0 :                 goto out;
     300             :         }
     301             : 
     302           0 :         s->program_id = kstrdup(program_id, GFP_KERNEL);
     303           0 :         if (!s->program_id) {
     304           0 :                 r = -ENOMEM;
     305           0 :                 goto out;
     306             :         }
     307           0 :         s->aux_data = kstrdup(aux_data, GFP_KERNEL);
     308           0 :         if (!s->aux_data) {
     309           0 :                 r = -ENOMEM;
     310           0 :                 goto out;
     311             :         }
     312             : 
     313           0 :         for (ni = 0; ni < n_entries; ni++) {
     314           0 :                 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
     315           0 :                 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
     316             :         }
     317             : 
     318           0 :         if (s->n_histogram_entries) {
     319           0 :                 unsigned long long *hi;
     320           0 :                 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
     321           0 :                 if (!hi) {
     322           0 :                         r = -ENOMEM;
     323           0 :                         goto out;
     324             :                 }
     325           0 :                 for (ni = 0; ni < n_entries; ni++) {
     326           0 :                         s->stat_shared[ni].tmp.histogram = hi;
     327           0 :                         hi += s->n_histogram_entries + 1;
     328             :                 }
     329             :         }
     330             : 
     331           0 :         for_each_possible_cpu(cpu) {
     332           0 :                 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
     333           0 :                 if (!p) {
     334           0 :                         r = -ENOMEM;
     335           0 :                         goto out;
     336             :                 }
     337           0 :                 s->stat_percpu[cpu] = p;
     338           0 :                 if (s->n_histogram_entries) {
     339           0 :                         unsigned long long *hi;
     340           0 :                         hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
     341           0 :                         if (!hi) {
     342           0 :                                 r = -ENOMEM;
     343           0 :                                 goto out;
     344             :                         }
     345           0 :                         for (ni = 0; ni < n_entries; ni++) {
     346           0 :                                 p[ni].histogram = hi;
     347           0 :                                 hi += s->n_histogram_entries + 1;
     348             :                         }
     349             :                 }
     350             :         }
     351             : 
     352             :         /*
     353             :          * Suspend/resume to make sure there is no i/o in flight,
     354             :          * so that newly created statistics will be exact.
     355             :          *
     356             :          * (note: we couldn't suspend earlier because we must not
     357             :          * allocate memory while suspended)
     358             :          */
     359           0 :         suspend_callback(md);
     360             : 
     361           0 :         mutex_lock(&stats->mutex);
     362           0 :         s->id = 0;
     363           0 :         list_for_each(l, &stats->list) {
     364           0 :                 tmp_s = container_of(l, struct dm_stat, list_entry);
     365           0 :                 if (WARN_ON(tmp_s->id < s->id)) {
     366           0 :                         r = -EINVAL;
     367           0 :                         goto out_unlock_resume;
     368             :                 }
     369           0 :                 if (tmp_s->id > s->id)
     370             :                         break;
     371           0 :                 if (unlikely(s->id == INT_MAX)) {
     372           0 :                         r = -ENFILE;
     373           0 :                         goto out_unlock_resume;
     374             :                 }
     375           0 :                 s->id++;
     376             :         }
     377           0 :         ret_id = s->id;
     378           0 :         list_add_tail_rcu(&s->list_entry, l);
     379           0 :         mutex_unlock(&stats->mutex);
     380             : 
     381           0 :         resume_callback(md);
     382             : 
     383           0 :         return ret_id;
     384             : 
     385           0 : out_unlock_resume:
     386           0 :         mutex_unlock(&stats->mutex);
     387           0 :         resume_callback(md);
     388           0 : out:
     389           0 :         dm_stat_free(&s->rcu_head);
     390           0 :         return r;
     391             : }
     392             : 
     393           0 : static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
     394             : {
     395           0 :         struct dm_stat *s;
     396             : 
     397           0 :         list_for_each_entry(s, &stats->list, list_entry) {
     398           0 :                 if (s->id > id)
     399             :                         break;
     400           0 :                 if (s->id == id)
     401             :                         return s;
     402             :         }
     403             : 
     404             :         return NULL;
     405             : }
     406             : 
     407           0 : static int dm_stats_delete(struct dm_stats *stats, int id)
     408             : {
     409           0 :         struct dm_stat *s;
     410           0 :         int cpu;
     411             : 
     412           0 :         mutex_lock(&stats->mutex);
     413             : 
     414           0 :         s = __dm_stats_find(stats, id);
     415           0 :         if (!s) {
     416           0 :                 mutex_unlock(&stats->mutex);
     417           0 :                 return -ENOENT;
     418             :         }
     419             : 
     420           0 :         list_del_rcu(&s->list_entry);
     421           0 :         mutex_unlock(&stats->mutex);
     422             : 
     423             :         /*
     424             :          * vfree can't be called from RCU callback
     425             :          */
     426           0 :         for_each_possible_cpu(cpu)
     427           0 :                 if (is_vmalloc_addr(s->stat_percpu) ||
     428           0 :                     is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
     429           0 :                         goto do_sync_free;
     430           0 :         if (is_vmalloc_addr(s) ||
     431           0 :             is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
     432           0 : do_sync_free:
     433           0 :                 synchronize_rcu_expedited();
     434           0 :                 dm_stat_free(&s->rcu_head);
     435             :         } else {
     436           0 :                 WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
     437           0 :                 call_rcu(&s->rcu_head, dm_stat_free);
     438             :         }
     439             :         return 0;
     440             : }
     441             : 
     442           0 : static int dm_stats_list(struct dm_stats *stats, const char *program,
     443             :                          char *result, unsigned maxlen)
     444             : {
     445           0 :         struct dm_stat *s;
     446           0 :         sector_t len;
     447           0 :         unsigned sz = 0;
     448             : 
     449             :         /*
     450             :          * Output format:
     451             :          *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
     452             :          */
     453             : 
     454           0 :         mutex_lock(&stats->mutex);
     455           0 :         list_for_each_entry(s, &stats->list, list_entry) {
     456           0 :                 if (!program || !strcmp(program, s->program_id)) {
     457           0 :                         len = s->end - s->start;
     458           0 :                         DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
     459             :                                 (unsigned long long)s->start,
     460             :                                 (unsigned long long)len,
     461             :                                 (unsigned long long)s->step,
     462             :                                 s->program_id,
     463             :                                 s->aux_data);
     464           0 :                         if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
     465           0 :                                 DMEMIT(" precise_timestamps");
     466           0 :                         if (s->n_histogram_entries) {
     467           0 :                                 unsigned i;
     468           0 :                                 DMEMIT(" histogram:");
     469           0 :                                 for (i = 0; i < s->n_histogram_entries; i++) {
     470           0 :                                         if (i)
     471           0 :                                                 DMEMIT(",");
     472           0 :                                         DMEMIT("%llu", s->histogram_boundaries[i]);
     473             :                                 }
     474             :                         }
     475           0 :                         DMEMIT("\n");
     476             :                 }
     477             :         }
     478           0 :         mutex_unlock(&stats->mutex);
     479             : 
     480           0 :         return 1;
     481             : }
     482             : 
     483           0 : static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
     484             :                           struct dm_stat_percpu *p)
     485             : {
     486             :         /*
     487             :          * This is racy, but so is part_round_stats_single.
     488             :          */
     489           0 :         unsigned long long now, difference;
     490           0 :         unsigned in_flight_read, in_flight_write;
     491             : 
     492           0 :         if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
     493           0 :                 now = jiffies;
     494             :         else
     495           0 :                 now = ktime_to_ns(ktime_get());
     496             : 
     497           0 :         difference = now - shared->stamp;
     498           0 :         if (!difference)
     499             :                 return;
     500             : 
     501           0 :         in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
     502           0 :         in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
     503           0 :         if (in_flight_read)
     504           0 :                 p->io_ticks[READ] += difference;
     505           0 :         if (in_flight_write)
     506           0 :                 p->io_ticks[WRITE] += difference;
     507           0 :         if (in_flight_read + in_flight_write) {
     508           0 :                 p->io_ticks_total += difference;
     509           0 :                 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
     510             :         }
     511           0 :         shared->stamp = now;
     512             : }
     513             : 
     514           0 : static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
     515             :                               int idx, sector_t len,
     516             :                               struct dm_stats_aux *stats_aux, bool end,
     517             :                               unsigned long duration_jiffies)
     518             : {
     519           0 :         struct dm_stat_shared *shared = &s->stat_shared[entry];
     520           0 :         struct dm_stat_percpu *p;
     521             : 
     522             :         /*
     523             :          * For strict correctness we should use local_irq_save/restore
     524             :          * instead of preempt_disable/enable.
     525             :          *
     526             :          * preempt_disable/enable is racy if the driver finishes bios
     527             :          * from non-interrupt context as well as from interrupt context
     528             :          * or from more different interrupts.
     529             :          *
     530             :          * On 64-bit architectures the race only results in not counting some
     531             :          * events, so it is acceptable.  On 32-bit architectures the race could
     532             :          * cause the counter going off by 2^32, so we need to do proper locking
     533             :          * there.
     534             :          *
     535             :          * part_stat_lock()/part_stat_unlock() have this race too.
     536             :          */
     537             : #if BITS_PER_LONG == 32
     538             :         unsigned long flags;
     539             :         local_irq_save(flags);
     540             : #else
     541           0 :         preempt_disable();
     542             : #endif
     543           0 :         p = &s->stat_percpu[smp_processor_id()][entry];
     544             : 
     545           0 :         if (!end) {
     546           0 :                 dm_stat_round(s, shared, p);
     547           0 :                 atomic_inc(&shared->in_flight[idx]);
     548             :         } else {
     549           0 :                 unsigned long long duration;
     550           0 :                 dm_stat_round(s, shared, p);
     551           0 :                 atomic_dec(&shared->in_flight[idx]);
     552           0 :                 p->sectors[idx] += len;
     553           0 :                 p->ios[idx] += 1;
     554           0 :                 p->merges[idx] += stats_aux->merged;
     555           0 :                 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
     556           0 :                         p->ticks[idx] += duration_jiffies;
     557           0 :                         duration = jiffies_to_msecs(duration_jiffies);
     558             :                 } else {
     559           0 :                         p->ticks[idx] += stats_aux->duration_ns;
     560           0 :                         duration = stats_aux->duration_ns;
     561             :                 }
     562           0 :                 if (s->n_histogram_entries) {
     563           0 :                         unsigned lo = 0, hi = s->n_histogram_entries + 1;
     564           0 :                         while (lo + 1 < hi) {
     565           0 :                                 unsigned mid = (lo + hi) / 2;
     566           0 :                                 if (s->histogram_boundaries[mid - 1] > duration) {
     567             :                                         hi = mid;
     568             :                                 } else {
     569           0 :                                         lo = mid;
     570             :                                 }
     571             : 
     572             :                         }
     573           0 :                         p->histogram[lo]++;
     574             :                 }
     575             :         }
     576             : 
     577             : #if BITS_PER_LONG == 32
     578             :         local_irq_restore(flags);
     579             : #else
     580           0 :         preempt_enable();
     581             : #endif
     582           0 : }
     583             : 
     584           0 : static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
     585             :                           sector_t bi_sector, sector_t end_sector,
     586             :                           bool end, unsigned long duration_jiffies,
     587             :                           struct dm_stats_aux *stats_aux)
     588             : {
     589           0 :         sector_t rel_sector, offset, todo, fragment_len;
     590           0 :         size_t entry;
     591             : 
     592           0 :         if (end_sector <= s->start || bi_sector >= s->end)
     593             :                 return;
     594           0 :         if (unlikely(bi_sector < s->start)) {
     595           0 :                 rel_sector = 0;
     596           0 :                 todo = end_sector - s->start;
     597             :         } else {
     598           0 :                 rel_sector = bi_sector - s->start;
     599           0 :                 todo = end_sector - bi_sector;
     600             :         }
     601           0 :         if (unlikely(end_sector > s->end))
     602           0 :                 todo -= (end_sector - s->end);
     603             : 
     604           0 :         offset = dm_sector_div64(rel_sector, s->step);
     605           0 :         entry = rel_sector;
     606           0 :         do {
     607           0 :                 if (WARN_ON_ONCE(entry >= s->n_entries)) {
     608           0 :                         DMCRIT("Invalid area access in region id %d", s->id);
     609           0 :                         return;
     610             :                 }
     611           0 :                 fragment_len = todo;
     612           0 :                 if (fragment_len > s->step - offset)
     613             :                         fragment_len = s->step - offset;
     614           0 :                 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
     615             :                                   stats_aux, end, duration_jiffies);
     616           0 :                 todo -= fragment_len;
     617           0 :                 entry++;
     618           0 :                 offset = 0;
     619           0 :         } while (unlikely(todo != 0));
     620             : }
     621             : 
     622           0 : void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
     623             :                          sector_t bi_sector, unsigned bi_sectors, bool end,
     624             :                          unsigned long duration_jiffies,
     625             :                          struct dm_stats_aux *stats_aux)
     626             : {
     627           0 :         struct dm_stat *s;
     628           0 :         sector_t end_sector;
     629           0 :         struct dm_stats_last_position *last;
     630           0 :         bool got_precise_time;
     631             : 
     632           0 :         if (unlikely(!bi_sectors))
     633             :                 return;
     634             : 
     635           0 :         end_sector = bi_sector + bi_sectors;
     636             : 
     637           0 :         if (!end) {
     638             :                 /*
     639             :                  * A race condition can at worst result in the merged flag being
     640             :                  * misrepresented, so we don't have to disable preemption here.
     641             :                  */
     642           0 :                 last = raw_cpu_ptr(stats->last);
     643           0 :                 stats_aux->merged =
     644           0 :                         (bi_sector == (READ_ONCE(last->last_sector) &&
     645           0 :                                        ((bi_rw == WRITE) ==
     646           0 :                                         (READ_ONCE(last->last_rw) == WRITE))
     647             :                                        ));
     648           0 :                 WRITE_ONCE(last->last_sector, end_sector);
     649           0 :                 WRITE_ONCE(last->last_rw, bi_rw);
     650             :         }
     651             : 
     652           0 :         rcu_read_lock();
     653             : 
     654           0 :         got_precise_time = false;
     655           0 :         list_for_each_entry_rcu(s, &stats->list, list_entry) {
     656           0 :                 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
     657           0 :                         if (!end)
     658           0 :                                 stats_aux->duration_ns = ktime_to_ns(ktime_get());
     659             :                         else
     660           0 :                                 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
     661             :                         got_precise_time = true;
     662             :                 }
     663           0 :                 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
     664             :         }
     665             : 
     666           0 :         rcu_read_unlock();
     667             : }
     668             : 
     669           0 : static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
     670             :                                                    struct dm_stat *s, size_t x)
     671             : {
     672           0 :         int cpu;
     673           0 :         struct dm_stat_percpu *p;
     674             : 
     675           0 :         local_irq_disable();
     676           0 :         p = &s->stat_percpu[smp_processor_id()][x];
     677           0 :         dm_stat_round(s, shared, p);
     678           0 :         local_irq_enable();
     679             : 
     680           0 :         shared->tmp.sectors[READ] = 0;
     681           0 :         shared->tmp.sectors[WRITE] = 0;
     682           0 :         shared->tmp.ios[READ] = 0;
     683           0 :         shared->tmp.ios[WRITE] = 0;
     684           0 :         shared->tmp.merges[READ] = 0;
     685           0 :         shared->tmp.merges[WRITE] = 0;
     686           0 :         shared->tmp.ticks[READ] = 0;
     687           0 :         shared->tmp.ticks[WRITE] = 0;
     688           0 :         shared->tmp.io_ticks[READ] = 0;
     689           0 :         shared->tmp.io_ticks[WRITE] = 0;
     690           0 :         shared->tmp.io_ticks_total = 0;
     691           0 :         shared->tmp.time_in_queue = 0;
     692             : 
     693           0 :         if (s->n_histogram_entries)
     694           0 :                 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
     695             : 
     696           0 :         for_each_possible_cpu(cpu) {
     697           0 :                 p = &s->stat_percpu[cpu][x];
     698           0 :                 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
     699           0 :                 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
     700           0 :                 shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
     701           0 :                 shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
     702           0 :                 shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
     703           0 :                 shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
     704           0 :                 shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
     705           0 :                 shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
     706           0 :                 shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
     707           0 :                 shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
     708           0 :                 shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
     709           0 :                 shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
     710           0 :                 if (s->n_histogram_entries) {
     711             :                         unsigned i;
     712           0 :                         for (i = 0; i < s->n_histogram_entries + 1; i++)
     713           0 :                                 shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
     714             :                 }
     715             :         }
     716           0 : }
     717             : 
     718           0 : static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
     719             :                             bool init_tmp_percpu_totals)
     720             : {
     721           0 :         size_t x;
     722           0 :         struct dm_stat_shared *shared;
     723           0 :         struct dm_stat_percpu *p;
     724             : 
     725           0 :         for (x = idx_start; x < idx_end; x++) {
     726           0 :                 shared = &s->stat_shared[x];
     727           0 :                 if (init_tmp_percpu_totals)
     728           0 :                         __dm_stat_init_temporary_percpu_totals(shared, s, x);
     729           0 :                 local_irq_disable();
     730           0 :                 p = &s->stat_percpu[smp_processor_id()][x];
     731           0 :                 p->sectors[READ] -= shared->tmp.sectors[READ];
     732           0 :                 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
     733           0 :                 p->ios[READ] -= shared->tmp.ios[READ];
     734           0 :                 p->ios[WRITE] -= shared->tmp.ios[WRITE];
     735           0 :                 p->merges[READ] -= shared->tmp.merges[READ];
     736           0 :                 p->merges[WRITE] -= shared->tmp.merges[WRITE];
     737           0 :                 p->ticks[READ] -= shared->tmp.ticks[READ];
     738           0 :                 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
     739           0 :                 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
     740           0 :                 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
     741           0 :                 p->io_ticks_total -= shared->tmp.io_ticks_total;
     742           0 :                 p->time_in_queue -= shared->tmp.time_in_queue;
     743           0 :                 local_irq_enable();
     744           0 :                 if (s->n_histogram_entries) {
     745             :                         unsigned i;
     746           0 :                         for (i = 0; i < s->n_histogram_entries + 1; i++) {
     747           0 :                                 local_irq_disable();
     748           0 :                                 p = &s->stat_percpu[smp_processor_id()][x];
     749           0 :                                 p->histogram[i] -= shared->tmp.histogram[i];
     750           0 :                                 local_irq_enable();
     751             :                         }
     752             :                 }
     753             :         }
     754           0 : }
     755             : 
     756           0 : static int dm_stats_clear(struct dm_stats *stats, int id)
     757             : {
     758           0 :         struct dm_stat *s;
     759             : 
     760           0 :         mutex_lock(&stats->mutex);
     761             : 
     762           0 :         s = __dm_stats_find(stats, id);
     763           0 :         if (!s) {
     764           0 :                 mutex_unlock(&stats->mutex);
     765           0 :                 return -ENOENT;
     766             :         }
     767             : 
     768           0 :         __dm_stat_clear(s, 0, s->n_entries, true);
     769             : 
     770           0 :         mutex_unlock(&stats->mutex);
     771             : 
     772           0 :         return 1;
     773             : }
     774             : 
     775             : /*
     776             :  * This is like jiffies_to_msec, but works for 64-bit values.
     777             :  */
     778           0 : static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
     779             : {
     780           0 :         unsigned long long result;
     781           0 :         unsigned mult;
     782             : 
     783           0 :         if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
     784             :                 return j;
     785             : 
     786           0 :         result = 0;
     787           0 :         if (j)
     788           0 :                 result = jiffies_to_msecs(j & 0x3fffff);
     789           0 :         if (j >= 1 << 22) {
     790           0 :                 mult = jiffies_to_msecs(1 << 22);
     791           0 :                 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
     792             :         }
     793           0 :         if (j >= 1ULL << 44)
     794           0 :                 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
     795             : 
     796             :         return result;
     797             : }
     798             : 
     799           0 : static int dm_stats_print(struct dm_stats *stats, int id,
     800             :                           size_t idx_start, size_t idx_len,
     801             :                           bool clear, char *result, unsigned maxlen)
     802             : {
     803           0 :         unsigned sz = 0;
     804           0 :         struct dm_stat *s;
     805           0 :         size_t x;
     806           0 :         sector_t start, end, step;
     807           0 :         size_t idx_end;
     808           0 :         struct dm_stat_shared *shared;
     809             : 
     810             :         /*
     811             :          * Output format:
     812             :          *   <start_sector>+<length> counters
     813             :          */
     814             : 
     815           0 :         mutex_lock(&stats->mutex);
     816             : 
     817           0 :         s = __dm_stats_find(stats, id);
     818           0 :         if (!s) {
     819           0 :                 mutex_unlock(&stats->mutex);
     820           0 :                 return -ENOENT;
     821             :         }
     822             : 
     823           0 :         idx_end = idx_start + idx_len;
     824           0 :         if (idx_end < idx_start ||
     825           0 :             idx_end > s->n_entries)
     826           0 :                 idx_end = s->n_entries;
     827             : 
     828           0 :         if (idx_start > idx_end)
     829             :                 idx_start = idx_end;
     830             : 
     831           0 :         step = s->step;
     832           0 :         start = s->start + (step * idx_start);
     833             : 
     834           0 :         for (x = idx_start; x < idx_end; x++, start = end) {
     835           0 :                 shared = &s->stat_shared[x];
     836           0 :                 end = start + step;
     837           0 :                 if (unlikely(end > s->end))
     838           0 :                         end = s->end;
     839             : 
     840           0 :                 __dm_stat_init_temporary_percpu_totals(shared, s, x);
     841             : 
     842           0 :                 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
     843             :                        (unsigned long long)start,
     844             :                        (unsigned long long)step,
     845             :                        shared->tmp.ios[READ],
     846             :                        shared->tmp.merges[READ],
     847             :                        shared->tmp.sectors[READ],
     848             :                        dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
     849             :                        shared->tmp.ios[WRITE],
     850             :                        shared->tmp.merges[WRITE],
     851             :                        shared->tmp.sectors[WRITE],
     852             :                        dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
     853             :                        dm_stat_in_flight(shared),
     854             :                        dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
     855             :                        dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
     856             :                        dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
     857             :                        dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
     858           0 :                 if (s->n_histogram_entries) {
     859             :                         unsigned i;
     860           0 :                         for (i = 0; i < s->n_histogram_entries + 1; i++) {
     861           0 :                                 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
     862             :                         }
     863             :                 }
     864           0 :                 DMEMIT("\n");
     865             : 
     866           0 :                 if (unlikely(sz + 1 >= maxlen))
     867           0 :                         goto buffer_overflow;
     868             :         }
     869             : 
     870           0 :         if (clear)
     871           0 :                 __dm_stat_clear(s, idx_start, idx_end, false);
     872             : 
     873           0 : buffer_overflow:
     874           0 :         mutex_unlock(&stats->mutex);
     875             : 
     876           0 :         return 1;
     877             : }
     878             : 
     879           0 : static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
     880             : {
     881           0 :         struct dm_stat *s;
     882           0 :         const char *new_aux_data;
     883             : 
     884           0 :         mutex_lock(&stats->mutex);
     885             : 
     886           0 :         s = __dm_stats_find(stats, id);
     887           0 :         if (!s) {
     888           0 :                 mutex_unlock(&stats->mutex);
     889           0 :                 return -ENOENT;
     890             :         }
     891             : 
     892           0 :         new_aux_data = kstrdup(aux_data, GFP_KERNEL);
     893           0 :         if (!new_aux_data) {
     894           0 :                 mutex_unlock(&stats->mutex);
     895           0 :                 return -ENOMEM;
     896             :         }
     897             : 
     898           0 :         kfree(s->aux_data);
     899           0 :         s->aux_data = new_aux_data;
     900             : 
     901           0 :         mutex_unlock(&stats->mutex);
     902             : 
     903           0 :         return 0;
     904             : }
     905             : 
     906           0 : static int parse_histogram(const char *h, unsigned *n_histogram_entries,
     907             :                            unsigned long long **histogram_boundaries)
     908             : {
     909           0 :         const char *q;
     910           0 :         unsigned n;
     911           0 :         unsigned long long last;
     912             : 
     913           0 :         *n_histogram_entries = 1;
     914           0 :         for (q = h; *q; q++)
     915           0 :                 if (*q == ',')
     916           0 :                         (*n_histogram_entries)++;
     917             : 
     918           0 :         *histogram_boundaries = kmalloc_array(*n_histogram_entries,
     919             :                                               sizeof(unsigned long long),
     920             :                                               GFP_KERNEL);
     921           0 :         if (!*histogram_boundaries)
     922             :                 return -ENOMEM;
     923             : 
     924             :         n = 0;
     925             :         last = 0;
     926           0 :         while (1) {
     927           0 :                 unsigned long long hi;
     928           0 :                 int s;
     929           0 :                 char ch;
     930           0 :                 s = sscanf(h, "%llu%c", &hi, &ch);
     931           0 :                 if (!s || (s == 2 && ch != ','))
     932           0 :                         return -EINVAL;
     933           0 :                 if (hi <= last)
     934             :                         return -EINVAL;
     935           0 :                 last = hi;
     936           0 :                 (*histogram_boundaries)[n] = hi;
     937           0 :                 if (s == 1)
     938             :                         return 0;
     939           0 :                 h = strchr(h, ',') + 1;
     940           0 :                 n++;
     941             :         }
     942             : }
     943             : 
     944           0 : static int message_stats_create(struct mapped_device *md,
     945             :                                 unsigned argc, char **argv,
     946             :                                 char *result, unsigned maxlen)
     947             : {
     948           0 :         int r;
     949           0 :         int id;
     950           0 :         char dummy;
     951           0 :         unsigned long long start, end, len, step;
     952           0 :         unsigned divisor;
     953           0 :         const char *program_id, *aux_data;
     954           0 :         unsigned stat_flags = 0;
     955             : 
     956           0 :         unsigned n_histogram_entries = 0;
     957           0 :         unsigned long long *histogram_boundaries = NULL;
     958             : 
     959           0 :         struct dm_arg_set as, as_backup;
     960           0 :         const char *a;
     961           0 :         unsigned feature_args;
     962             : 
     963             :         /*
     964             :          * Input format:
     965             :          *   <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
     966             :          */
     967             : 
     968           0 :         if (argc < 3)
     969           0 :                 goto ret_einval;
     970             : 
     971           0 :         as.argc = argc;
     972           0 :         as.argv = argv;
     973           0 :         dm_consume_args(&as, 1);
     974             : 
     975           0 :         a = dm_shift_arg(&as);
     976           0 :         if (!strcmp(a, "-")) {
     977           0 :                 start = 0;
     978           0 :                 len = dm_get_size(md);
     979           0 :                 if (!len)
     980           0 :                         len = 1;
     981           0 :         } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
     982             :                    start != (sector_t)start || len != (sector_t)len)
     983           0 :                 goto ret_einval;
     984             : 
     985           0 :         end = start + len;
     986           0 :         if (start >= end)
     987           0 :                 goto ret_einval;
     988             : 
     989           0 :         a = dm_shift_arg(&as);
     990           0 :         if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
     991           0 :                 if (!divisor)
     992             :                         return -EINVAL;
     993           0 :                 step = end - start;
     994           0 :                 if (do_div(step, divisor))
     995           0 :                         step++;
     996           0 :                 if (!step)
     997           0 :                         step = 1;
     998           0 :         } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
     999           0 :                    step != (sector_t)step || !step)
    1000           0 :                 goto ret_einval;
    1001             : 
    1002           0 :         as_backup = as;
    1003           0 :         a = dm_shift_arg(&as);
    1004           0 :         if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
    1005           0 :                 while (feature_args--) {
    1006           0 :                         a = dm_shift_arg(&as);
    1007           0 :                         if (!a)
    1008           0 :                                 goto ret_einval;
    1009           0 :                         if (!strcasecmp(a, "precise_timestamps"))
    1010             :                                 stat_flags |= STAT_PRECISE_TIMESTAMPS;
    1011           0 :                         else if (!strncasecmp(a, "histogram:", 10)) {
    1012           0 :                                 if (n_histogram_entries)
    1013           0 :                                         goto ret_einval;
    1014           0 :                                 if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
    1015           0 :                                         goto ret;
    1016             :                         } else
    1017           0 :                                 goto ret_einval;
    1018             :                 }
    1019             :         } else {
    1020           0 :                 as = as_backup;
    1021             :         }
    1022             : 
    1023           0 :         program_id = "-";
    1024           0 :         aux_data = "-";
    1025             : 
    1026           0 :         a = dm_shift_arg(&as);
    1027           0 :         if (a)
    1028           0 :                 program_id = a;
    1029             : 
    1030           0 :         a = dm_shift_arg(&as);
    1031           0 :         if (a)
    1032           0 :                 aux_data = a;
    1033             : 
    1034           0 :         if (as.argc)
    1035           0 :                 goto ret_einval;
    1036             : 
    1037             :         /*
    1038             :          * If a buffer overflow happens after we created the region,
    1039             :          * it's too late (the userspace would retry with a larger
    1040             :          * buffer, but the region id that caused the overflow is already
    1041             :          * leaked).  So we must detect buffer overflow in advance.
    1042             :          */
    1043           0 :         snprintf(result, maxlen, "%d", INT_MAX);
    1044           0 :         if (dm_message_test_buffer_overflow(result, maxlen)) {
    1045           0 :                 r = 1;
    1046           0 :                 goto ret;
    1047             :         }
    1048             : 
    1049           0 :         id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
    1050             :                              n_histogram_entries, histogram_boundaries, program_id, aux_data,
    1051             :                              dm_internal_suspend_fast, dm_internal_resume_fast, md);
    1052           0 :         if (id < 0) {
    1053           0 :                 r = id;
    1054           0 :                 goto ret;
    1055             :         }
    1056             : 
    1057           0 :         snprintf(result, maxlen, "%d", id);
    1058             : 
    1059           0 :         r = 1;
    1060           0 :         goto ret;
    1061             : 
    1062             : ret_einval:
    1063             :         r = -EINVAL;
    1064           0 : ret:
    1065           0 :         kfree(histogram_boundaries);
    1066           0 :         return r;
    1067             : }
    1068             : 
    1069           0 : static int message_stats_delete(struct mapped_device *md,
    1070             :                                 unsigned argc, char **argv)
    1071             : {
    1072           0 :         int id;
    1073           0 :         char dummy;
    1074             : 
    1075           0 :         if (argc != 2)
    1076             :                 return -EINVAL;
    1077             : 
    1078           0 :         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
    1079             :                 return -EINVAL;
    1080             : 
    1081           0 :         return dm_stats_delete(dm_get_stats(md), id);
    1082             : }
    1083             : 
    1084           0 : static int message_stats_clear(struct mapped_device *md,
    1085             :                                unsigned argc, char **argv)
    1086             : {
    1087           0 :         int id;
    1088           0 :         char dummy;
    1089             : 
    1090           0 :         if (argc != 2)
    1091             :                 return -EINVAL;
    1092             : 
    1093           0 :         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
    1094             :                 return -EINVAL;
    1095             : 
    1096           0 :         return dm_stats_clear(dm_get_stats(md), id);
    1097             : }
    1098             : 
    1099           0 : static int message_stats_list(struct mapped_device *md,
    1100             :                               unsigned argc, char **argv,
    1101             :                               char *result, unsigned maxlen)
    1102             : {
    1103           0 :         int r;
    1104           0 :         const char *program = NULL;
    1105             : 
    1106           0 :         if (argc < 1 || argc > 2)
    1107             :                 return -EINVAL;
    1108             : 
    1109           0 :         if (argc > 1) {
    1110           0 :                 program = kstrdup(argv[1], GFP_KERNEL);
    1111           0 :                 if (!program)
    1112             :                         return -ENOMEM;
    1113             :         }
    1114             : 
    1115           0 :         r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
    1116             : 
    1117           0 :         kfree(program);
    1118             : 
    1119           0 :         return r;
    1120             : }
    1121             : 
    1122           0 : static int message_stats_print(struct mapped_device *md,
    1123             :                                unsigned argc, char **argv, bool clear,
    1124             :                                char *result, unsigned maxlen)
    1125             : {
    1126           0 :         int id;
    1127           0 :         char dummy;
    1128           0 :         unsigned long idx_start = 0, idx_len = ULONG_MAX;
    1129             : 
    1130           0 :         if (argc != 2 && argc != 4)
    1131             :                 return -EINVAL;
    1132             : 
    1133           0 :         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
    1134             :                 return -EINVAL;
    1135             : 
    1136           0 :         if (argc > 3) {
    1137           0 :                 if (strcmp(argv[2], "-") &&
    1138           0 :                     sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
    1139             :                         return -EINVAL;
    1140           0 :                 if (strcmp(argv[3], "-") &&
    1141           0 :                     sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
    1142             :                         return -EINVAL;
    1143             :         }
    1144             : 
    1145           0 :         return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
    1146             :                               result, maxlen);
    1147             : }
    1148             : 
    1149           0 : static int message_stats_set_aux(struct mapped_device *md,
    1150             :                                  unsigned argc, char **argv)
    1151             : {
    1152           0 :         int id;
    1153           0 :         char dummy;
    1154             : 
    1155           0 :         if (argc != 3)
    1156             :                 return -EINVAL;
    1157             : 
    1158           0 :         if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
    1159             :                 return -EINVAL;
    1160             : 
    1161           0 :         return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
    1162             : }
    1163             : 
    1164           0 : int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
    1165             :                      char *result, unsigned maxlen)
    1166             : {
    1167           0 :         int r;
    1168             : 
    1169             :         /* All messages here must start with '@' */
    1170           0 :         if (!strcasecmp(argv[0], "@stats_create"))
    1171           0 :                 r = message_stats_create(md, argc, argv, result, maxlen);
    1172           0 :         else if (!strcasecmp(argv[0], "@stats_delete"))
    1173           0 :                 r = message_stats_delete(md, argc, argv);
    1174           0 :         else if (!strcasecmp(argv[0], "@stats_clear"))
    1175           0 :                 r = message_stats_clear(md, argc, argv);
    1176           0 :         else if (!strcasecmp(argv[0], "@stats_list"))
    1177           0 :                 r = message_stats_list(md, argc, argv, result, maxlen);
    1178           0 :         else if (!strcasecmp(argv[0], "@stats_print"))
    1179           0 :                 r = message_stats_print(md, argc, argv, false, result, maxlen);
    1180           0 :         else if (!strcasecmp(argv[0], "@stats_print_clear"))
    1181           0 :                 r = message_stats_print(md, argc, argv, true, result, maxlen);
    1182           0 :         else if (!strcasecmp(argv[0], "@stats_set_aux"))
    1183           0 :                 r = message_stats_set_aux(md, argc, argv);
    1184             :         else
    1185             :                 return 2; /* this wasn't a stats message */
    1186             : 
    1187           0 :         if (r == -EINVAL)
    1188           0 :                 DMWARN("Invalid parameters for message %s", argv[0]);
    1189             : 
    1190             :         return r;
    1191             : }
    1192             : 
    1193           1 : int __init dm_statistics_init(void)
    1194             : {
    1195           1 :         shared_memory_amount = 0;
    1196           1 :         dm_stat_need_rcu_barrier = 0;
    1197           1 :         return 0;
    1198             : }
    1199             : 
    1200           0 : void dm_statistics_exit(void)
    1201             : {
    1202           0 :         if (dm_stat_need_rcu_barrier)
    1203           0 :                 rcu_barrier();
    1204           0 :         if (WARN_ON(shared_memory_amount))
    1205           0 :                 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
    1206           0 : }
    1207             : 
    1208             : module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
    1209             : MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");

Generated by: LCOV version 1.14