1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Asynchronous Compression operations
4 *
5 * Copyright (c) 2016, Intel Corporation
6 * Authors: Weigang Li <weigang.li@intel.com>
7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9#ifndef _CRYPTO_ACOMP_INT_H
10#define _CRYPTO_ACOMP_INT_H
11
12#include <crypto/acompress.h>
13#include <crypto/algapi.h>
14#include <crypto/scatterwalk.h>
15#include <linux/compiler_types.h>
16#include <linux/cpumask_types.h>
17#include <linux/spinlock.h>
18#include <linux/workqueue_types.h>
19
20#define ACOMP_FBREQ_ON_STACK(name, req) \
21 char __##name##_req[sizeof(struct acomp_req) + \
22 MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
23 struct acomp_req *name = acomp_fbreq_on_stack_init( \
24 __##name##_req, (req))
25
26/**
27 * struct acomp_alg - asynchronous compression algorithm
28 *
29 * @compress: Function performs a compress operation
30 * @decompress: Function performs a de-compress operation
31 * @init: Initialize the cryptographic transformation object.
32 * This function is used to initialize the cryptographic
33 * transformation object. This function is called only once at
34 * the instantiation time, right after the transformation context
35 * was allocated. In case the cryptographic hardware has some
36 * special requirements which need to be handled by software, this
37 * function shall check for the precise requirement of the
38 * transformation and put any software fallbacks in place.
39 * @exit: Deinitialize the cryptographic transformation object. This is a
40 * counterpart to @init, used to remove various changes set in
41 * @init.
42 *
43 * @base: Common crypto API algorithm data structure
44 * @calg: Cmonn algorithm data structure shared with scomp
45 */
46struct acomp_alg {
47 int (*compress)(struct acomp_req *req);
48 int (*decompress)(struct acomp_req *req);
49 int (*init)(struct crypto_acomp *tfm);
50 void (*exit)(struct crypto_acomp *tfm);
51
52 union {
53 struct COMP_ALG_COMMON;
54 struct comp_alg_common calg;
55 };
56};
57
58struct crypto_acomp_stream {
59 spinlock_t lock;
60 void *ctx;
61};
62
63struct crypto_acomp_streams {
64 /* These must come first because of struct scomp_alg. */
65 void *(*alloc_ctx)(void);
66 void (*free_ctx)(void *);
67
68 struct crypto_acomp_stream __percpu *streams;
69 struct work_struct stream_work;
70 cpumask_t stream_want;
71};
72
73struct acomp_walk {
74 union {
75 /* Virtual address of the source. */
76 struct {
77 struct {
78 const void *const addr;
79 } virt;
80 } src;
81
82 /* Private field for the API, do not use. */
83 struct scatter_walk in;
84 };
85
86 union {
87 /* Virtual address of the destination. */
88 struct {
89 struct {
90 void *const addr;
91 } virt;
92 } dst;
93
94 /* Private field for the API, do not use. */
95 struct scatter_walk out;
96 };
97
98 unsigned int slen;
99 unsigned int dlen;
100
101 int flags;
102};
103
104/*
105 * Transform internal helpers.
106 */
107static inline void *acomp_request_ctx(struct acomp_req *req)
108{
109 return req->__ctx;
110}
111
112static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
113{
114 return tfm->base.__crt_ctx;
115}
116
117static inline void acomp_request_complete(struct acomp_req *req,
118 int err)
119{
120 crypto_request_complete(req: &req->base, err);
121}
122
123/**
124 * crypto_register_acomp() -- Register asynchronous compression algorithm
125 *
126 * Function registers an implementation of an asynchronous
127 * compression algorithm
128 *
129 * @alg: algorithm definition
130 *
131 * Return: zero on success; error code in case of error
132 */
133int crypto_register_acomp(struct acomp_alg *alg);
134
135/**
136 * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
137 *
138 * Function unregisters an implementation of an asynchronous
139 * compression algorithm
140 *
141 * @alg: algorithm definition
142 */
143void crypto_unregister_acomp(struct acomp_alg *alg);
144
145int crypto_register_acomps(struct acomp_alg *algs, int count);
146void crypto_unregister_acomps(struct acomp_alg *algs, int count);
147
148static inline bool acomp_request_issg(struct acomp_req *req)
149{
150 return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
151 CRYPTO_ACOMP_REQ_DST_VIRT));
152}
153
154static inline bool acomp_request_src_isvirt(struct acomp_req *req)
155{
156 return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
157}
158
159static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
160{
161 return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
162}
163
164static inline bool acomp_request_isvirt(struct acomp_req *req)
165{
166 return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
167 CRYPTO_ACOMP_REQ_DST_VIRT);
168}
169
170static inline bool acomp_request_src_isnondma(struct acomp_req *req)
171{
172 return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
173}
174
175static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
176{
177 return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
178}
179
180static inline bool acomp_request_isnondma(struct acomp_req *req)
181{
182 return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
183 CRYPTO_ACOMP_REQ_DST_NONDMA);
184}
185
186static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
187{
188 return crypto_tfm_req_virt(tfm: &tfm->base);
189}
190
191void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
192int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
193
194struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
195 struct crypto_acomp_streams *s) __acquires(stream);
196
197static inline void crypto_acomp_unlock_stream_bh(
198 struct crypto_acomp_stream *stream) __releases(stream)
199{
200 spin_unlock_bh(lock: &stream->lock);
201}
202
203void acomp_walk_done_src(struct acomp_walk *walk, int used);
204void acomp_walk_done_dst(struct acomp_walk *walk, int used);
205int acomp_walk_next_src(struct acomp_walk *walk);
206int acomp_walk_next_dst(struct acomp_walk *walk);
207int acomp_walk_virt(struct acomp_walk *__restrict walk,
208 struct acomp_req *__restrict req, bool atomic);
209
210static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
211{
212 return walk->slen != cur;
213}
214
215static inline u32 acomp_request_flags(struct acomp_req *req)
216{
217 return crypto_request_flags(req: &req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
218}
219
220static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
221{
222 return __crypto_acomp_tfm(tfm: crypto_acomp_tfm(tfm)->fb);
223}
224
225static inline struct acomp_req *acomp_fbreq_on_stack_init(
226 char *buf, struct acomp_req *old)
227{
228 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req: old);
229 struct acomp_req *req = (void *)buf;
230
231 crypto_stack_request_init(req: &req->base,
232 tfm: crypto_acomp_tfm(tfm: crypto_acomp_fb(tfm)));
233 acomp_request_set_callback(req, flgs: acomp_request_flags(req: old), NULL, NULL);
234 req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
235 req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
236 req->src = old->src;
237 req->dst = old->dst;
238 req->slen = old->slen;
239 req->dlen = old->dlen;
240
241 return req;
242}
243
244#endif
245