LCOV - code coverage report
Current view: top level - crypto - scompress.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 11 144 7.6 %
Date: 2021-04-22 12:43:58 Functions: 2 16 12.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Synchronous Compression operations
       4             :  *
       5             :  * Copyright 2015 LG Electronics Inc.
       6             :  * Copyright (c) 2016, Intel Corporation
       7             :  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
       8             :  */
       9             : #include <linux/errno.h>
      10             : #include <linux/kernel.h>
      11             : #include <linux/module.h>
      12             : #include <linux/seq_file.h>
      13             : #include <linux/slab.h>
      14             : #include <linux/string.h>
      15             : #include <linux/crypto.h>
      16             : #include <linux/compiler.h>
      17             : #include <linux/vmalloc.h>
      18             : #include <crypto/algapi.h>
      19             : #include <linux/cryptouser.h>
      20             : #include <net/netlink.h>
      21             : #include <linux/scatterlist.h>
      22             : #include <crypto/scatterwalk.h>
      23             : #include <crypto/internal/acompress.h>
      24             : #include <crypto/internal/scompress.h>
      25             : #include "internal.h"
      26             : 
      27             : struct scomp_scratch {
      28             :         spinlock_t      lock;
      29             :         void            *src;
      30             :         void            *dst;
      31             : };
      32             : 
      33             : static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
      34             :         .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
      35             : };
      36             : 
      37             : static const struct crypto_type crypto_scomp_type;
      38             : static int scomp_scratch_users;
      39             : static DEFINE_MUTEX(scomp_lock);
      40             : 
      41             : #ifdef CONFIG_NET
      42           0 : static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
      43             : {
      44           0 :         struct crypto_report_comp rscomp;
      45             : 
      46           0 :         memset(&rscomp, 0, sizeof(rscomp));
      47             : 
      48           0 :         strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
      49             : 
      50           0 :         return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
      51             :                        sizeof(rscomp), &rscomp);
      52             : }
      53             : #else
      54             : static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
      55             : {
      56             :         return -ENOSYS;
      57             : }
      58             : #endif
      59             : 
      60             : static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
      61             :         __maybe_unused;
      62             : 
      63           0 : static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
      64             : {
      65           0 :         seq_puts(m, "type         : scomp\n");
      66           0 : }
      67             : 
      68           0 : static void crypto_scomp_free_scratches(void)
      69             : {
      70           0 :         struct scomp_scratch *scratch;
      71           0 :         int i;
      72             : 
      73           0 :         for_each_possible_cpu(i) {
      74           0 :                 scratch = per_cpu_ptr(&scomp_scratch, i);
      75             : 
      76           0 :                 vfree(scratch->src);
      77           0 :                 vfree(scratch->dst);
      78           0 :                 scratch->src = NULL;
      79           0 :                 scratch->dst = NULL;
      80             :         }
      81           0 : }
      82             : 
      83           0 : static int crypto_scomp_alloc_scratches(void)
      84             : {
      85           0 :         struct scomp_scratch *scratch;
      86           0 :         int i;
      87             : 
      88           0 :         for_each_possible_cpu(i) {
      89           0 :                 void *mem;
      90             : 
      91           0 :                 scratch = per_cpu_ptr(&scomp_scratch, i);
      92             : 
      93           0 :                 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
      94           0 :                 if (!mem)
      95           0 :                         goto error;
      96           0 :                 scratch->src = mem;
      97           0 :                 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
      98           0 :                 if (!mem)
      99           0 :                         goto error;
     100           0 :                 scratch->dst = mem;
     101             :         }
     102             :         return 0;
     103           0 : error:
     104           0 :         crypto_scomp_free_scratches();
     105           0 :         return -ENOMEM;
     106             : }
     107             : 
     108           0 : static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
     109             : {
     110           0 :         int ret = 0;
     111             : 
     112           0 :         mutex_lock(&scomp_lock);
     113           0 :         if (!scomp_scratch_users++)
     114           0 :                 ret = crypto_scomp_alloc_scratches();
     115           0 :         mutex_unlock(&scomp_lock);
     116             : 
     117           0 :         return ret;
     118             : }
     119             : 
     120           0 : static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
     121             : {
     122           0 :         struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
     123           0 :         void **tfm_ctx = acomp_tfm_ctx(tfm);
     124           0 :         struct crypto_scomp *scomp = *tfm_ctx;
     125           0 :         void **ctx = acomp_request_ctx(req);
     126           0 :         struct scomp_scratch *scratch;
     127           0 :         int ret;
     128             : 
     129           0 :         if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
     130             :                 return -EINVAL;
     131             : 
     132           0 :         if (req->dst && !req->dlen)
     133             :                 return -EINVAL;
     134             : 
     135           0 :         if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
     136           0 :                 req->dlen = SCOMP_SCRATCH_SIZE;
     137             : 
     138           0 :         scratch = raw_cpu_ptr(&scomp_scratch);
     139           0 :         spin_lock(&scratch->lock);
     140             : 
     141           0 :         scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
     142           0 :         if (dir)
     143           0 :                 ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
     144           0 :                                             scratch->dst, &req->dlen, *ctx);
     145             :         else
     146           0 :                 ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
     147           0 :                                               scratch->dst, &req->dlen, *ctx);
     148           0 :         if (!ret) {
     149           0 :                 if (!req->dst) {
     150           0 :                         req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
     151           0 :                         if (!req->dst) {
     152           0 :                                 ret = -ENOMEM;
     153           0 :                                 goto out;
     154             :                         }
     155             :                 }
     156           0 :                 scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
     157             :                                          1);
     158             :         }
     159           0 : out:
     160           0 :         spin_unlock(&scratch->lock);
     161           0 :         return ret;
     162             : }
     163             : 
     164           0 : static int scomp_acomp_compress(struct acomp_req *req)
     165             : {
     166           0 :         return scomp_acomp_comp_decomp(req, 1);
     167             : }
     168             : 
     169           0 : static int scomp_acomp_decompress(struct acomp_req *req)
     170             : {
     171           0 :         return scomp_acomp_comp_decomp(req, 0);
     172             : }
     173             : 
     174           0 : static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
     175             : {
     176           0 :         struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
     177             : 
     178           0 :         crypto_free_scomp(*ctx);
     179             : 
     180           0 :         mutex_lock(&scomp_lock);
     181           0 :         if (!--scomp_scratch_users)
     182           0 :                 crypto_scomp_free_scratches();
     183           0 :         mutex_unlock(&scomp_lock);
     184           0 : }
     185             : 
     186           0 : int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
     187             : {
     188           0 :         struct crypto_alg *calg = tfm->__crt_alg;
     189           0 :         struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
     190           0 :         struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
     191           0 :         struct crypto_scomp *scomp;
     192             : 
     193           0 :         if (!crypto_mod_get(calg))
     194             :                 return -EAGAIN;
     195             : 
     196           0 :         scomp = crypto_create_tfm(calg, &crypto_scomp_type);
     197           0 :         if (IS_ERR(scomp)) {
     198           0 :                 crypto_mod_put(calg);
     199           0 :                 return PTR_ERR(scomp);
     200             :         }
     201             : 
     202           0 :         *ctx = scomp;
     203           0 :         tfm->exit = crypto_exit_scomp_ops_async;
     204             : 
     205           0 :         crt->compress = scomp_acomp_compress;
     206           0 :         crt->decompress = scomp_acomp_decompress;
     207           0 :         crt->dst_free = sgl_free;
     208           0 :         crt->reqsize = sizeof(void *);
     209             : 
     210           0 :         return 0;
     211             : }
     212             : 
     213           0 : struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
     214             : {
     215           0 :         struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
     216           0 :         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
     217           0 :         struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
     218           0 :         struct crypto_scomp *scomp = *tfm_ctx;
     219           0 :         void *ctx;
     220             : 
     221           0 :         ctx = crypto_scomp_alloc_ctx(scomp);
     222           0 :         if (IS_ERR(ctx)) {
     223           0 :                 kfree(req);
     224           0 :                 return NULL;
     225             :         }
     226             : 
     227           0 :         *req->__ctx = ctx;
     228             : 
     229           0 :         return req;
     230             : }
     231             : 
     232           0 : void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
     233             : {
     234           0 :         struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
     235           0 :         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
     236           0 :         struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
     237           0 :         struct crypto_scomp *scomp = *tfm_ctx;
     238           0 :         void *ctx = *req->__ctx;
     239             : 
     240           0 :         if (ctx)
     241           0 :                 crypto_scomp_free_ctx(scomp, ctx);
     242           0 : }
     243             : 
     244             : static const struct crypto_type crypto_scomp_type = {
     245             :         .extsize = crypto_alg_extsize,
     246             :         .init_tfm = crypto_scomp_init_tfm,
     247             : #ifdef CONFIG_PROC_FS
     248             :         .show = crypto_scomp_show,
     249             : #endif
     250             :         .report = crypto_scomp_report,
     251             :         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
     252             :         .maskset = CRYPTO_ALG_TYPE_MASK,
     253             :         .type = CRYPTO_ALG_TYPE_SCOMPRESS,
     254             :         .tfmsize = offsetof(struct crypto_scomp, base),
     255             : };
     256             : 
     257           2 : int crypto_register_scomp(struct scomp_alg *alg)
     258             : {
     259           2 :         struct crypto_alg *base = &alg->base;
     260             : 
     261           2 :         base->cra_type = &crypto_scomp_type;
     262           2 :         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
     263           2 :         base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
     264             : 
     265           2 :         return crypto_register_alg(base);
     266             : }
     267             : EXPORT_SYMBOL_GPL(crypto_register_scomp);
     268             : 
     269           0 : void crypto_unregister_scomp(struct scomp_alg *alg)
     270             : {
     271           0 :         crypto_unregister_alg(&alg->base);
     272           0 : }
     273             : EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
     274             : 
     275           1 : int crypto_register_scomps(struct scomp_alg *algs, int count)
     276             : {
     277           1 :         int i, ret;
     278             : 
     279           3 :         for (i = 0; i < count; i++) {
     280           2 :                 ret = crypto_register_scomp(&algs[i]);
     281           2 :                 if (ret)
     282           0 :                         goto err;
     283             :         }
     284             : 
     285             :         return 0;
     286             : 
     287           0 : err:
     288           0 :         for (--i; i >= 0; --i)
     289           0 :                 crypto_unregister_scomp(&algs[i]);
     290             : 
     291             :         return ret;
     292             : }
     293             : EXPORT_SYMBOL_GPL(crypto_register_scomps);
     294             : 
     295           0 : void crypto_unregister_scomps(struct scomp_alg *algs, int count)
     296             : {
     297           0 :         int i;
     298             : 
     299           0 :         for (i = count - 1; i >= 0; --i)
     300           0 :                 crypto_unregister_scomp(&algs[i]);
     301           0 : }
     302             : EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
     303             : 
     304             : MODULE_LICENSE("GPL");
     305             : MODULE_DESCRIPTION("Synchronous compression type");

Generated by: LCOV version 1.14