Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 : /* 3 : * Cryptographic scatter and gather helpers. 4 : * 5 : * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 : * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com> 7 : * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com> 8 : * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 9 : */ 10 : 11 : #ifndef _CRYPTO_SCATTERWALK_H 12 : #define _CRYPTO_SCATTERWALK_H 13 : 14 : #include <crypto/algapi.h> 15 : #include <linux/highmem.h> 16 : #include <linux/kernel.h> 17 : #include <linux/scatterlist.h> 18 : 19 0 : static inline void scatterwalk_crypto_chain(struct scatterlist *head, 20 : struct scatterlist *sg, int num) 21 : { 22 0 : if (sg) 23 0 : sg_chain(head, num, sg); 24 : else 25 0 : sg_mark_end(head); 26 0 : } 27 : 28 0 : static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) 29 : { 30 0 : unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; 31 0 : unsigned int len_this_page = offset_in_page(~walk->offset) + 1; 32 0 : return len_this_page > len ? len : len_this_page; 33 : } 34 : 35 0 : static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, 36 : unsigned int nbytes) 37 : { 38 0 : unsigned int len_this_page = scatterwalk_pagelen(walk); 39 0 : return nbytes > len_this_page ? len_this_page : nbytes; 40 : } 41 : 42 0 : static inline void scatterwalk_advance(struct scatter_walk *walk, 43 : unsigned int nbytes) 44 : { 45 0 : walk->offset += nbytes; 46 : } 47 : 48 : static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, 49 : unsigned int alignmask) 50 : { 51 : return !(walk->offset & alignmask); 52 : } 53 : 54 0 : static inline struct page *scatterwalk_page(struct scatter_walk *walk) 55 : { 56 0 : return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 57 : } 58 : 59 0 : static inline void scatterwalk_unmap(void *vaddr) 60 : { 61 0 : kunmap_atomic(vaddr); 62 0 : } 63 : 64 0 : static inline void scatterwalk_start(struct scatter_walk *walk, 65 : struct scatterlist *sg) 66 : { 67 0 : walk->sg = sg; 68 0 : walk->offset = sg->offset; 69 0 : } 70 : 71 0 : static inline void *scatterwalk_map(struct scatter_walk *walk) 72 : { 73 0 : return kmap_atomic(scatterwalk_page(walk)) + 74 0 : offset_in_page(walk->offset); 75 : } 76 : 77 0 : static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, 78 : unsigned int more) 79 : { 80 0 : if (out) { 81 : struct page *page; 82 : 83 : page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); 84 : /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as 85 : * PageSlab cannot be optimised away per se due to 86 : * use of volatile pointer. 87 : */ 88 : if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page)) 89 : flush_dcache_page(page); 90 : } 91 : 92 0 : if (more && walk->offset >= walk->sg->offset + walk->sg->length) 93 0 : scatterwalk_start(walk, sg_next(walk->sg)); 94 0 : } 95 : 96 0 : static inline void scatterwalk_done(struct scatter_walk *walk, int out, 97 : int more) 98 : { 99 0 : if (!more || walk->offset >= walk->sg->offset + walk->sg->length || 100 : !(walk->offset & (PAGE_SIZE - 1))) 101 0 : scatterwalk_pagedone(walk, out, more); 102 0 : } 103 : 104 : void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 105 : size_t nbytes, int out); 106 : void *scatterwalk_map(struct scatter_walk *walk); 107 : 108 : void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, 109 : unsigned int start, unsigned int nbytes, int out); 110 : 111 : struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], 112 : struct scatterlist *src, 113 : unsigned int len); 114 : 115 : #endif /* _CRYPTO_SCATTERWALK_H */