net/tap: set BPF syscall ID for RISC-V
[dpdk.git] / drivers / crypto / qat / dev / qat_crypto_pmd_gen4.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include "qat_sym_session.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 #include "qat_crypto.h"
11 #include "qat_crypto_pmd_gens.h"
12
13 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
14         QAT_SYM_CIPHER_CAP(AES_CBC,
15                 CAP_SET(block_size, 16),
16                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
17         QAT_SYM_AUTH_CAP(SHA1_HMAC,
18                 CAP_SET(block_size, 64),
19                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
20                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
21         QAT_SYM_AUTH_CAP(SHA224_HMAC,
22                 CAP_SET(block_size, 64),
23                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
24                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
25         QAT_SYM_AUTH_CAP(SHA256_HMAC,
26                 CAP_SET(block_size, 64),
27                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
28                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
29         QAT_SYM_AUTH_CAP(SHA384_HMAC,
30                 CAP_SET(block_size, 128),
31                 CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
32                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
33         QAT_SYM_AUTH_CAP(SHA512_HMAC,
34                 CAP_SET(block_size, 128),
35                 CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
36                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
37         QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
38                 CAP_SET(block_size, 16),
39                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
40                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
41         QAT_SYM_AUTH_CAP(AES_CMAC,
42                 CAP_SET(block_size, 16),
43                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
44                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
45         QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
46                 CAP_SET(block_size, 16),
47                 CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
48         QAT_SYM_AUTH_CAP(NULL,
49                 CAP_SET(block_size, 1),
50                 CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
51                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
52         QAT_SYM_CIPHER_CAP(NULL,
53                 CAP_SET(block_size, 1),
54                 CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
55         QAT_SYM_PLAIN_AUTH_CAP(SHA1,
56                 CAP_SET(block_size, 64),
57                 CAP_RNG(digest_size, 1, 20, 1)),
58         QAT_SYM_AUTH_CAP(SHA224,
59                 CAP_SET(block_size, 64),
60                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
61                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
62         QAT_SYM_AUTH_CAP(SHA256,
63                 CAP_SET(block_size, 64),
64                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
65                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
66         QAT_SYM_AUTH_CAP(SHA384,
67                 CAP_SET(block_size, 128),
68                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
69                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
70         QAT_SYM_AUTH_CAP(SHA512,
71                 CAP_SET(block_size, 128),
72                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
73                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
74         QAT_SYM_CIPHER_CAP(AES_CTR,
75                 CAP_SET(block_size, 16),
76                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
77         QAT_SYM_AEAD_CAP(AES_GCM,
78                 CAP_SET(block_size, 16),
79                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
80                 CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
81         QAT_SYM_AEAD_CAP(AES_CCM,
82                 CAP_SET(block_size, 16),
83                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
84                 CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
85         QAT_SYM_AUTH_CAP(AES_GMAC,
86                 CAP_SET(block_size, 16),
87                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
88                 CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
89         QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
90                 CAP_SET(block_size, 64),
91                 CAP_RNG(key_size, 32, 32, 0),
92                 CAP_RNG(digest_size, 16, 16, 0),
93                 CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
94         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
95 };
96
97 static struct qat_capabilities_info
98 qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
99 {
100         struct qat_capabilities_info capa_info;
101         capa_info.data = qat_sym_crypto_caps_gen4;
102         capa_info.size = sizeof(qat_sym_crypto_caps_gen4);
103         return capa_info;
104 }
105
106 static __rte_always_inline void
107 enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
108         struct icp_qat_fw_la_bulk_req *req,
109         struct rte_crypto_va_iova_ptr *iv,
110         struct rte_crypto_va_iova_ptr *digest,
111         struct rte_crypto_va_iova_ptr *aad,
112         union rte_crypto_sym_ofs ofs, uint32_t data_len)
113 {
114         if (ctx->is_single_pass && ctx->is_ucs) {
115                 struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
116                         (void *)&req->serv_specif_rqpars;
117                 struct icp_qat_fw_la_cipher_req_params *cipher_param =
118                         (void *)&req->serv_specif_rqpars;
119
120                 /* QAT GEN4 uses single pass to treat AEAD as cipher
121                  * operation
122                  */
123                 qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
124                                 req);
125                 cipher_param->cipher_offset = ofs.ofs.cipher.head;
126                 cipher_param->cipher_length = data_len -
127                                 ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
128
129                 cipher_param_20->spc_aad_addr = aad->iova;
130                 cipher_param_20->spc_auth_res_addr = digest->iova;
131
132                 return;
133         }
134
135         enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
136 }
137
138 static int
139 qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
140                 uint8_t *out_msg, void *op_cookie)
141 {
142         register struct icp_qat_fw_la_bulk_req *qat_req;
143         struct rte_crypto_op *op = in_op;
144         struct qat_sym_op_cookie *cookie = op_cookie;
145         struct rte_crypto_sgl in_sgl, out_sgl;
146         struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
147                         out_vec[QAT_SYM_SGL_MAX_NUMBER];
148         struct rte_crypto_va_iova_ptr cipher_iv;
149         struct rte_crypto_va_iova_ptr aad;
150         struct rte_crypto_va_iova_ptr digest;
151         union rte_crypto_sym_ofs ofs;
152         int32_t total_len;
153
154         in_sgl.vec = in_vec;
155         out_sgl.vec = out_vec;
156
157         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
158         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
159
160         ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
161                         &cipher_iv, &aad, &digest);
162         if (unlikely(ofs.raw == UINT64_MAX)) {
163                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
164                 return -EINVAL;
165         }
166
167         total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
168                         in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
169         if (unlikely(total_len < 0)) {
170                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
171                 return -EINVAL;
172         }
173
174         enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
175                 total_len);
176
177 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
178         qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
179                         NULL, &aad, &digest);
180 #endif
181
182         return 0;
183 }
184
185 static int
186 qat_sym_crypto_set_session_gen4(void *cdev, void *session)
187 {
188         struct qat_sym_session *ctx = session;
189         enum rte_proc_type_t proc_type = rte_eal_process_type();
190         int ret;
191
192         if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
193                 return -EINVAL;
194
195         ret = qat_sym_crypto_set_session_gen1(cdev, session);
196         /* special single pass build request for GEN4 */
197         if (ctx->is_single_pass && ctx->is_ucs)
198                 ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
199
200         if (ret == -ENOTSUP) {
201                 /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
202                  * this is addressed by GEN4
203                  */
204                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
205                                 ctx->qat_cipher_alg !=
206                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
207                         qat_sym_session_set_ext_hash_flags_gen2(ctx,
208                                 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
209                 } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
210                                 ctx->qat_cipher_alg !=
211                                 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
212                         qat_sym_session_set_ext_hash_flags_gen2(ctx,
213                                 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
214                 } else if ((ctx->aes_cmac ||
215                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
216                                 (ctx->qat_cipher_alg ==
217                                 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
218                                 ctx->qat_cipher_alg ==
219                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
220                         qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
221                 }
222
223                 ret = 0;
224         }
225
226         return ret;
227 }
228
229 static int
230 qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
231         struct rte_crypto_vec *data, uint16_t n_data_vecs,
232         union rte_crypto_sym_ofs ofs,
233         struct rte_crypto_va_iova_ptr *iv,
234         struct rte_crypto_va_iova_ptr *digest,
235         struct rte_crypto_va_iova_ptr *aad,
236         void *user_data)
237 {
238         struct qat_qp *qp = qp_data;
239         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
240         struct qat_queue *tx_queue = &qp->tx_q;
241         struct qat_sym_op_cookie *cookie;
242         struct qat_sym_session *ctx = dp_ctx->session;
243         struct icp_qat_fw_la_bulk_req *req;
244
245         int32_t data_len;
246         uint32_t tail = dp_ctx->tail;
247
248         req = (struct icp_qat_fw_la_bulk_req *)(
249                 (uint8_t *)tx_queue->base_addr + tail);
250         cookie = qp->op_cookies[tail >> tx_queue->trailz];
251         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
252         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
253         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
254         data_len = qat_sym_build_req_set_data(req, user_data, cookie,
255                         data, n_data_vecs, NULL, 0);
256         if (unlikely(data_len < 0))
257                 return -1;
258
259         enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
260                 (uint32_t)data_len);
261
262         dp_ctx->tail = tail;
263         dp_ctx->cached_enqueue++;
264
265 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
266         qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
267                         NULL, aad, digest);
268 #endif
269         return 0;
270 }
271
272 static uint32_t
273 qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
274         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
275         void *user_data[], int *status)
276 {
277         struct qat_qp *qp = qp_data;
278         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
279         struct qat_queue *tx_queue = &qp->tx_q;
280         struct qat_sym_session *ctx = dp_ctx->session;
281         uint32_t i, n;
282         uint32_t tail;
283         struct icp_qat_fw_la_bulk_req *req;
284         int32_t data_len;
285
286         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
287         if (unlikely(n == 0)) {
288                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
289                 *status = 0;
290                 return 0;
291         }
292
293         tail = dp_ctx->tail;
294
295         for (i = 0; i < n; i++) {
296                 struct qat_sym_op_cookie *cookie =
297                         qp->op_cookies[tail >> tx_queue->trailz];
298
299                 req  = (struct icp_qat_fw_la_bulk_req *)(
300                         (uint8_t *)tx_queue->base_addr + tail);
301                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
302
303                 if (vec->dest_sgl) {
304                         data_len = qat_sym_build_req_set_data(req,
305                                 user_data[i], cookie,
306                                 vec->src_sgl[i].vec, vec->src_sgl[i].num,
307                                 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
308                 } else {
309                         data_len = qat_sym_build_req_set_data(req,
310                                 user_data[i], cookie,
311                                 vec->src_sgl[i].vec,
312                                 vec->src_sgl[i].num, NULL, 0);
313                 }
314
315                 if (unlikely(data_len < 0))
316                         break;
317
318                 enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
319                                 &vec->digest[i], &vec->aad[i], ofs,
320                                 (uint32_t)data_len);
321
322                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
323
324 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
325                 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
326                                 vec->src_sgl[i].num, &vec->iv[i], NULL,
327                                 &vec->aad[i], &vec->digest[i]);
328 #endif
329         }
330
331         if (unlikely(i < n))
332                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
333
334         dp_ctx->tail = tail;
335         dp_ctx->cached_enqueue += i;
336         *status = 0;
337         return i;
338 }
339
340 static int
341 qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
342 {
343         struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
344         struct qat_sym_session *ctx = _ctx;
345         int ret;
346
347         ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
348         if (ret < 0)
349                 return ret;
350
351         if (ctx->is_single_pass && ctx->is_ucs) {
352                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
353                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
354         }
355
356         return 0;
357 }
358
359 RTE_INIT(qat_sym_crypto_gen4_init)
360 {
361         qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
362         qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
363                         qat_sym_crypto_cap_get_gen4;
364         qat_sym_gen_dev_ops[QAT_GEN4].set_session =
365                         qat_sym_crypto_set_session_gen4;
366         qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
367                         qat_sym_configure_raw_dp_ctx_gen4;
368         qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
369                         qat_sym_crypto_feature_flags_get_gen1;
370 #ifdef RTE_LIB_SECURITY
371         qat_sym_gen_dev_ops[QAT_GEN4].create_security_ctx =
372                         qat_sym_create_security_gen1;
373 #endif
374 }
375
376 RTE_INIT(qat_asym_crypto_gen4_init)
377 {
378         qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops =
379                         &qat_asym_crypto_ops_gen1;
380         qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities =
381                         qat_asym_crypto_cap_get_gen1;
382         qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags =
383                         qat_asym_crypto_feature_flags_get_gen1;
384         qat_asym_gen_dev_ops[QAT_GEN4].set_session =
385                         qat_asym_crypto_set_session_gen1;
386 }