net/tap: set BPF syscall ID for RISC-V
[dpdk.git] / drivers / crypto / qat / dev / qat_sym_pmd_gen1.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2022 Intel Corporation
3  */
4
5 #include <rte_cryptodev.h>
6 #ifdef RTE_LIB_SECURITY
7 #include <rte_security_driver.h>
8 #endif
9
10 #include "adf_transport_access_macros.h"
11 #include "icp_qat_fw.h"
12 #include "icp_qat_fw_la.h"
13
14 #include "qat_sym.h"
15 #include "qat_sym_session.h"
16 #include "qat_crypto.h"
17 #include "qat_crypto_pmd_gens.h"
18
19 static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
20         QAT_SYM_PLAIN_AUTH_CAP(SHA1,
21                 CAP_SET(block_size, 64),
22                 CAP_RNG(digest_size, 1, 20, 1)),
23         QAT_SYM_AEAD_CAP(AES_GCM,
24                 CAP_SET(block_size, 16),
25                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
26                 CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
27         QAT_SYM_AEAD_CAP(AES_CCM,
28                 CAP_SET(block_size, 16),
29                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
30                 CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
31         QAT_SYM_AUTH_CAP(AES_GMAC,
32                 CAP_SET(block_size, 16),
33                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
34                 CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
35         QAT_SYM_AUTH_CAP(AES_CMAC,
36                 CAP_SET(block_size, 16),
37                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
38                         CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
39         QAT_SYM_AUTH_CAP(SHA224,
40                 CAP_SET(block_size, 64),
41                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
42                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
43         QAT_SYM_AUTH_CAP(SHA256,
44                 CAP_SET(block_size, 64),
45                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
46                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
47         QAT_SYM_AUTH_CAP(SHA384,
48                 CAP_SET(block_size, 128),
49                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
50                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
51         QAT_SYM_AUTH_CAP(SHA512,
52                 CAP_SET(block_size, 128),
53                 CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
54                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
55         QAT_SYM_AUTH_CAP(SHA1_HMAC,
56                 CAP_SET(block_size, 64),
57                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
58                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
59         QAT_SYM_AUTH_CAP(SHA224_HMAC,
60                 CAP_SET(block_size, 64),
61                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
62                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
63         QAT_SYM_AUTH_CAP(SHA256_HMAC,
64                 CAP_SET(block_size, 64),
65                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
66                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
67         QAT_SYM_AUTH_CAP(SHA384_HMAC,
68                 CAP_SET(block_size, 128),
69                 CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
70                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
71         QAT_SYM_AUTH_CAP(SHA512_HMAC,
72                 CAP_SET(block_size, 128),
73                 CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
74                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
75         QAT_SYM_AUTH_CAP(MD5_HMAC,
76                 CAP_SET(block_size, 64),
77                 CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
78                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
79         QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
80                 CAP_SET(block_size, 16),
81                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
82                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
83         QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
84                 CAP_SET(block_size, 16),
85                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
86                 CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
87         QAT_SYM_AUTH_CAP(KASUMI_F9,
88                 CAP_SET(block_size, 8),
89                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
90                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
91         QAT_SYM_AUTH_CAP(NULL,
92                 CAP_SET(block_size, 1),
93                 CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
94                 CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
95         QAT_SYM_CIPHER_CAP(AES_CBC,
96                 CAP_SET(block_size, 16),
97                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
98         QAT_SYM_CIPHER_CAP(AES_CTR,
99                 CAP_SET(block_size, 16),
100                 CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
101         QAT_SYM_CIPHER_CAP(AES_XTS,
102                 CAP_SET(block_size, 16),
103                 CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
104         QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
105                 CAP_SET(block_size, 16),
106                 CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
107         QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
108                 CAP_SET(block_size, 16),
109                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
110         QAT_SYM_CIPHER_CAP(KASUMI_F8,
111                 CAP_SET(block_size, 8),
112                 CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
113         QAT_SYM_CIPHER_CAP(NULL,
114                 CAP_SET(block_size, 1),
115                 CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
116         QAT_SYM_CIPHER_CAP(3DES_CBC,
117                 CAP_SET(block_size, 8),
118                 CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
119         QAT_SYM_CIPHER_CAP(3DES_CTR,
120                 CAP_SET(block_size, 8),
121                 CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
122         QAT_SYM_CIPHER_CAP(DES_CBC,
123                 CAP_SET(block_size, 8),
124                 CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
125         QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
126                 CAP_SET(block_size, 8),
127                 CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
128         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
129 };
130
131 struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
132
133         /* Device related operations */
134         .dev_configure          = qat_cryptodev_config,
135         .dev_start              = qat_cryptodev_start,
136         .dev_stop               = qat_cryptodev_stop,
137         .dev_close              = qat_cryptodev_close,
138         .dev_infos_get          = qat_cryptodev_info_get,
139
140         .stats_get              = qat_cryptodev_stats_get,
141         .stats_reset            = qat_cryptodev_stats_reset,
142         .queue_pair_setup       = qat_cryptodev_qp_setup,
143         .queue_pair_release     = qat_cryptodev_qp_release,
144
145         /* Crypto related operations */
146         .sym_session_get_size   = qat_sym_session_get_private_size,
147         .sym_session_configure  = qat_sym_session_configure,
148         .sym_session_clear      = qat_sym_session_clear,
149
150         /* Raw data-path API related operations */
151         .sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
152         .sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
153 };
154
155 static struct qat_capabilities_info
156 qat_sym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused)
157 {
158         struct qat_capabilities_info capa_info;
159         capa_info.data = qat_sym_crypto_caps_gen1;
160         capa_info.size = sizeof(qat_sym_crypto_caps_gen1);
161         return capa_info;
162 }
163
164 uint64_t
165 qat_sym_crypto_feature_flags_get_gen1(
166         struct qat_pci_device *qat_dev __rte_unused)
167 {
168         uint64_t feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
169                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
170                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
171                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
172                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
173                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
174                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
175                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
176                         RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
177                         RTE_CRYPTODEV_FF_SYM_RAW_DP;
178
179         return feature_flags;
180 }
181
182 int
183 qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
184                 uint8_t *out_msg, void *op_cookie)
185 {
186         register struct icp_qat_fw_la_bulk_req *req;
187         struct rte_crypto_op *op = in_op;
188         struct qat_sym_op_cookie *cookie = op_cookie;
189         struct rte_crypto_sgl in_sgl, out_sgl;
190         struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
191                         out_vec[QAT_SYM_SGL_MAX_NUMBER];
192         struct rte_crypto_va_iova_ptr cipher_iv;
193         union rte_crypto_sym_ofs ofs;
194         int32_t total_len;
195
196         in_sgl.vec = in_vec;
197         out_sgl.vec = out_vec;
198
199         req = (struct icp_qat_fw_la_bulk_req *)out_msg;
200         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
201
202         ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
203                         &cipher_iv, NULL, NULL);
204         if (unlikely(ofs.raw == UINT64_MAX)) {
205                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
206                 return -EINVAL;
207         }
208
209         total_len = qat_sym_build_req_set_data(req, in_op, cookie,
210                         in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
211         if (unlikely(total_len < 0)) {
212                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
213                 return -EINVAL;
214         }
215
216         enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
217
218 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
219         qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
220                         NULL, NULL, NULL);
221 #endif
222
223         return 0;
224 }
225
226 int
227 qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
228                 uint8_t *out_msg, void *op_cookie)
229 {
230         register struct icp_qat_fw_la_bulk_req *req;
231         struct rte_crypto_op *op = in_op;
232         struct qat_sym_op_cookie *cookie = op_cookie;
233         struct rte_crypto_sgl in_sgl, out_sgl;
234         struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
235                         out_vec[QAT_SYM_SGL_MAX_NUMBER];
236         struct rte_crypto_va_iova_ptr auth_iv;
237         struct rte_crypto_va_iova_ptr digest;
238         union rte_crypto_sym_ofs ofs;
239         int32_t total_len;
240
241         in_sgl.vec = in_vec;
242         out_sgl.vec = out_vec;
243
244         req = (struct icp_qat_fw_la_bulk_req *)out_msg;
245         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
246
247         ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
248                         NULL, &auth_iv, &digest);
249         if (unlikely(ofs.raw == UINT64_MAX)) {
250                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
251                 return -EINVAL;
252         }
253
254         total_len = qat_sym_build_req_set_data(req, in_op, cookie,
255                         in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
256         if (unlikely(total_len < 0)) {
257                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
258                 return -EINVAL;
259         }
260
261         enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
262                         total_len);
263
264 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
265         qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
266                         &auth_iv, NULL, &digest);
267 #endif
268
269         return 0;
270 }
271
272 int
273 qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
274                 uint8_t *out_msg, void *op_cookie)
275 {
276         register struct icp_qat_fw_la_bulk_req *req;
277         struct rte_crypto_op *op = in_op;
278         struct qat_sym_op_cookie *cookie = op_cookie;
279         struct rte_crypto_sgl in_sgl, out_sgl;
280         struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
281                         out_vec[QAT_SYM_SGL_MAX_NUMBER];
282         struct rte_crypto_va_iova_ptr cipher_iv;
283         struct rte_crypto_va_iova_ptr aad;
284         struct rte_crypto_va_iova_ptr digest;
285         union rte_crypto_sym_ofs ofs;
286         int32_t total_len;
287
288         in_sgl.vec = in_vec;
289         out_sgl.vec = out_vec;
290
291         req = (struct icp_qat_fw_la_bulk_req *)out_msg;
292         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
293
294         ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
295                         &cipher_iv, &aad, &digest);
296         if (unlikely(ofs.raw == UINT64_MAX)) {
297                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
298                 return -EINVAL;
299         }
300
301         total_len = qat_sym_build_req_set_data(req, in_op, cookie,
302                         in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
303         if (unlikely(total_len < 0)) {
304                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
305                 return -EINVAL;
306         }
307
308         enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
309                 total_len);
310
311 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
312         qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
313                         NULL, &aad, &digest);
314 #endif
315
316         return 0;
317 }
318
319 int
320 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
321                 uint8_t *out_msg, void *op_cookie)
322 {
323         register struct icp_qat_fw_la_bulk_req *req;
324         struct rte_crypto_op *op = in_op;
325         struct qat_sym_op_cookie *cookie = op_cookie;
326         struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
327         struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
328                         out_vec[QAT_SYM_SGL_MAX_NUMBER];
329         struct rte_crypto_va_iova_ptr cipher_iv;
330         struct rte_crypto_va_iova_ptr auth_iv;
331         struct rte_crypto_va_iova_ptr digest;
332         union rte_crypto_sym_ofs ofs;
333         int32_t total_len;
334
335         in_sgl.vec = in_vec;
336         out_sgl.vec = out_vec;
337
338         req = (struct icp_qat_fw_la_bulk_req *)out_msg;
339         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
340
341         ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
342                         &cipher_iv, &auth_iv, &digest);
343         if (unlikely(ofs.raw == UINT64_MAX)) {
344                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
345                 return -EINVAL;
346         }
347
348         total_len = qat_sym_build_req_set_data(req, in_op, cookie,
349                         in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
350         if (unlikely(total_len < 0)) {
351                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
352                 return -EINVAL;
353         }
354
355         enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
356                         out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
357                         ofs, total_len);
358
359 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
360         qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
361                         &auth_iv, NULL, &digest);
362 #endif
363
364         return 0;
365 }
366
367 #ifdef RTE_LIB_SECURITY
368
369 #define QAT_SECURITY_SYM_CAPABILITIES                                   \
370         {       /* AES DOCSIS BPI */                                    \
371                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
372                 {.sym = {                                               \
373                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,      \
374                         {.cipher = {                                    \
375                                 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
376                                 .block_size = 16,                       \
377                                 .key_size = {                           \
378                                         .min = 16,                      \
379                                         .max = 32,                      \
380                                         .increment = 16                 \
381                                 },                                      \
382                                 .iv_size = {                            \
383                                         .min = 16,                      \
384                                         .max = 16,                      \
385                                         .increment = 0                  \
386                                 }                                       \
387                         }, }                                            \
388                 }, }                                                    \
389         }
390
391 #define QAT_SECURITY_CAPABILITIES(sym)                                  \
392         [0] = { /* DOCSIS Uplink */                                     \
393                 .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,  \
394                 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS,               \
395                 .docsis = {                                             \
396                         .direction = RTE_SECURITY_DOCSIS_UPLINK         \
397                 },                                                      \
398                 .crypto_capabilities = (sym)                            \
399         },                                                              \
400         [1] = { /* DOCSIS Downlink */                                   \
401                 .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,  \
402                 .protocol = RTE_SECURITY_PROTOCOL_DOCSIS,               \
403                 .docsis = {                                             \
404                         .direction = RTE_SECURITY_DOCSIS_DOWNLINK       \
405                 },                                                      \
406                 .crypto_capabilities = (sym)                            \
407         }
408
409 static const struct rte_cryptodev_capabilities
410                                         qat_security_sym_capabilities[] = {
411         QAT_SECURITY_SYM_CAPABILITIES,
412         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
413 };
414
415 static const struct rte_security_capability qat_security_capabilities_gen1[] = {
416         QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
417         {
418                 .action = RTE_SECURITY_ACTION_TYPE_NONE
419         }
420 };
421
422 static const struct rte_security_capability *
423 qat_security_cap_get_gen1(void *dev __rte_unused)
424 {
425         return qat_security_capabilities_gen1;
426 }
427
428 struct rte_security_ops security_qat_ops_gen1 = {
429                 .session_create = qat_security_session_create,
430                 .session_update = NULL,
431                 .session_stats_get = NULL,
432                 .session_destroy = qat_security_session_destroy,
433                 .set_pkt_metadata = NULL,
434                 .capabilities_get = qat_security_cap_get_gen1
435 };
436
437 void *
438 qat_sym_create_security_gen1(void *cryptodev)
439 {
440         struct rte_security_ctx *security_instance;
441
442         security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
443                         RTE_CACHE_LINE_SIZE);
444         if (security_instance == NULL)
445                 return NULL;
446
447         security_instance->device = cryptodev;
448         security_instance->ops = &security_qat_ops_gen1;
449         security_instance->sess_cnt = 0;
450
451         return (void *)security_instance;
452 }
453
454 #endif
455 int
456 qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
457         struct rte_crypto_vec *data, uint16_t n_data_vecs,
458         union rte_crypto_sym_ofs ofs,
459         struct rte_crypto_va_iova_ptr *iv,
460         struct rte_crypto_va_iova_ptr *digest __rte_unused,
461         struct rte_crypto_va_iova_ptr *aad __rte_unused,
462         void *user_data)
463 {
464         struct qat_qp *qp = qp_data;
465         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
466         struct qat_queue *tx_queue = &qp->tx_q;
467         struct qat_sym_session *ctx = dp_ctx->session;
468         struct qat_sym_op_cookie *cookie;
469         struct icp_qat_fw_la_bulk_req *req;
470         int32_t data_len;
471         uint32_t tail = dp_ctx->tail;
472
473         req = (struct icp_qat_fw_la_bulk_req *)(
474                 (uint8_t *)tx_queue->base_addr + tail);
475         cookie = qp->op_cookies[tail >> tx_queue->trailz];
476         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
477         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
478         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
479
480         data_len = qat_sym_build_req_set_data(req, user_data, cookie,
481                         data, n_data_vecs, NULL, 0);
482         if (unlikely(data_len < 0))
483                 return -1;
484
485         enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
486
487 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
488         qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
489                         NULL, NULL, NULL);
490 #endif
491
492         dp_ctx->tail = tail;
493         dp_ctx->cached_enqueue++;
494
495         return 0;
496 }
497
498 uint32_t
499 qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
500         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
501         void *user_data[], int *status)
502 {
503         struct qat_qp *qp = qp_data;
504         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
505         struct qat_queue *tx_queue = &qp->tx_q;
506         struct qat_sym_session *ctx = dp_ctx->session;
507         uint32_t i, n;
508         uint32_t tail;
509         struct icp_qat_fw_la_bulk_req *req;
510         int32_t data_len;
511
512         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
513         if (unlikely(n == 0)) {
514                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
515                 *status = 0;
516                 return 0;
517         }
518
519         tail = dp_ctx->tail;
520
521         for (i = 0; i < n; i++) {
522                 struct qat_sym_op_cookie *cookie =
523                         qp->op_cookies[tail >> tx_queue->trailz];
524
525                 req  = (struct icp_qat_fw_la_bulk_req *)(
526                         (uint8_t *)tx_queue->base_addr + tail);
527                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
528
529                 if (vec->dest_sgl) {
530                         data_len = qat_sym_build_req_set_data(req,
531                                 user_data[i], cookie,
532                                 vec->src_sgl[i].vec, vec->src_sgl[i].num,
533                                 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
534                 } else {
535                         data_len = qat_sym_build_req_set_data(req,
536                                 user_data[i], cookie,
537                                 vec->src_sgl[i].vec,
538                                 vec->src_sgl[i].num, NULL, 0);
539                 }
540
541                 if (unlikely(data_len < 0))
542                         break;
543                 enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
544                         (uint32_t)data_len);
545                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
546
547 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
548                 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
549                                 vec->src_sgl[i].num, &vec->iv[i],
550                                 NULL, NULL, NULL);
551 #endif
552         }
553
554         if (unlikely(i < n))
555                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
556
557         dp_ctx->tail = tail;
558         dp_ctx->cached_enqueue += i;
559         *status = 0;
560         return i;
561 }
562
563 int
564 qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
565         struct rte_crypto_vec *data, uint16_t n_data_vecs,
566         union rte_crypto_sym_ofs ofs,
567         struct rte_crypto_va_iova_ptr *iv __rte_unused,
568         struct rte_crypto_va_iova_ptr *digest,
569         struct rte_crypto_va_iova_ptr *auth_iv,
570         void *user_data)
571 {
572         struct qat_qp *qp = qp_data;
573         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
574         struct qat_queue *tx_queue = &qp->tx_q;
575         struct qat_sym_op_cookie *cookie;
576         struct qat_sym_session *ctx = dp_ctx->session;
577         struct icp_qat_fw_la_bulk_req *req;
578         int32_t data_len;
579         uint32_t tail = dp_ctx->tail;
580
581         req = (struct icp_qat_fw_la_bulk_req *)(
582                 (uint8_t *)tx_queue->base_addr + tail);
583         cookie = qp->op_cookies[tail >> tx_queue->trailz];
584         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
585
586         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
587         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
588         data_len = qat_sym_build_req_set_data(req, user_data, cookie,
589                         data, n_data_vecs, NULL, 0);
590         if (unlikely(data_len < 0))
591                 return -1;
592
593         enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
594                         (uint32_t)data_len);
595
596         dp_ctx->tail = tail;
597         dp_ctx->cached_enqueue++;
598
599 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
600         qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
601                         auth_iv, NULL, digest);
602 #endif
603         return 0;
604 }
605
606 uint32_t
607 qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
608         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
609         void *user_data[], int *status)
610 {
611         struct qat_qp *qp = qp_data;
612         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
613         struct qat_queue *tx_queue = &qp->tx_q;
614         struct qat_sym_session *ctx = dp_ctx->session;
615         uint32_t i, n;
616         uint32_t tail;
617         struct icp_qat_fw_la_bulk_req *req;
618         int32_t data_len;
619
620         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
621         if (unlikely(n == 0)) {
622                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
623                 *status = 0;
624                 return 0;
625         }
626
627         tail = dp_ctx->tail;
628
629         for (i = 0; i < n; i++) {
630                 struct qat_sym_op_cookie *cookie =
631                         qp->op_cookies[tail >> tx_queue->trailz];
632
633                 req  = (struct icp_qat_fw_la_bulk_req *)(
634                         (uint8_t *)tx_queue->base_addr + tail);
635                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
636
637                 if (vec->dest_sgl) {
638                         data_len = qat_sym_build_req_set_data(req,
639                                 user_data[i], cookie,
640                                 vec->src_sgl[i].vec, vec->src_sgl[i].num,
641                                 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
642                 } else {
643                         data_len = qat_sym_build_req_set_data(req,
644                                 user_data[i], cookie,
645                                 vec->src_sgl[i].vec,
646                                 vec->src_sgl[i].num, NULL, 0);
647                 }
648
649                 if (unlikely(data_len < 0))
650                         break;
651                 enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
652                         &vec->auth_iv[i], ofs, (uint32_t)data_len);
653                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
654
655 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
656                 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
657                                 vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
658                                 NULL, &vec->digest[i]);
659 #endif
660         }
661
662         if (unlikely(i < n))
663                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
664
665         dp_ctx->tail = tail;
666         dp_ctx->cached_enqueue += i;
667         *status = 0;
668         return i;
669 }
670
671 int
672 qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
673         struct rte_crypto_vec *data, uint16_t n_data_vecs,
674         union rte_crypto_sym_ofs ofs,
675         struct rte_crypto_va_iova_ptr *cipher_iv,
676         struct rte_crypto_va_iova_ptr *digest,
677         struct rte_crypto_va_iova_ptr *auth_iv,
678         void *user_data)
679 {
680         struct qat_qp *qp = qp_data;
681         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
682         struct qat_queue *tx_queue = &qp->tx_q;
683         struct qat_sym_op_cookie *cookie;
684         struct qat_sym_session *ctx = dp_ctx->session;
685         struct icp_qat_fw_la_bulk_req *req;
686         int32_t data_len;
687         uint32_t tail = dp_ctx->tail;
688
689         req = (struct icp_qat_fw_la_bulk_req *)(
690                 (uint8_t *)tx_queue->base_addr + tail);
691         cookie = qp->op_cookies[tail >> tx_queue->trailz];
692         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
693         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
694         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
695         data_len = qat_sym_build_req_set_data(req, user_data, cookie,
696                         data, n_data_vecs, NULL, 0);
697         if (unlikely(data_len < 0))
698                 return -1;
699
700         if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
701                         NULL, 0, cipher_iv, digest, auth_iv, ofs,
702                         (uint32_t)data_len)))
703                 return -1;
704
705         dp_ctx->tail = tail;
706         dp_ctx->cached_enqueue++;
707
708
709 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
710         qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
711                         auth_iv, NULL, digest);
712 #endif
713         return 0;
714 }
715
716 uint32_t
717 qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
718         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
719         void *user_data[], int *status)
720 {
721         struct qat_qp *qp = qp_data;
722         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
723         struct qat_queue *tx_queue = &qp->tx_q;
724         struct qat_sym_session *ctx = dp_ctx->session;
725         uint32_t i, n;
726         uint32_t tail;
727         struct icp_qat_fw_la_bulk_req *req;
728         int32_t data_len;
729
730         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
731         if (unlikely(n == 0)) {
732                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
733                 *status = 0;
734                 return 0;
735         }
736
737         tail = dp_ctx->tail;
738
739         for (i = 0; i < n; i++) {
740                 struct qat_sym_op_cookie *cookie =
741                         qp->op_cookies[tail >> tx_queue->trailz];
742
743                 req  = (struct icp_qat_fw_la_bulk_req *)(
744                         (uint8_t *)tx_queue->base_addr + tail);
745                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
746
747                 if (vec->dest_sgl) {
748                         data_len = qat_sym_build_req_set_data(req,
749                                 user_data[i], cookie,
750                                 vec->src_sgl[i].vec, vec->src_sgl[i].num,
751                                 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
752                 } else {
753                         data_len = qat_sym_build_req_set_data(req,
754                                 user_data[i], cookie,
755                                 vec->src_sgl[i].vec,
756                                 vec->src_sgl[i].num, NULL, 0);
757                 }
758
759                 if (unlikely(data_len < 0))
760                         break;
761
762                 if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
763                                 vec->src_sgl[i].vec, vec->src_sgl[i].num,
764                                 NULL, 0,
765                                 &vec->iv[i], &vec->digest[i],
766                                 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
767                         break;
768
769                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
770
771 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
772                 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
773                                 vec->src_sgl[i].num, &vec->iv[i],
774                                 &vec->auth_iv[i],
775                                 NULL, &vec->digest[i]);
776 #endif
777         }
778
779         if (unlikely(i < n))
780                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
781
782         dp_ctx->tail = tail;
783         dp_ctx->cached_enqueue += i;
784         *status = 0;
785         return i;
786 }
787
788 int
789 qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
790         struct rte_crypto_vec *data, uint16_t n_data_vecs,
791         union rte_crypto_sym_ofs ofs,
792         struct rte_crypto_va_iova_ptr *iv,
793         struct rte_crypto_va_iova_ptr *digest,
794         struct rte_crypto_va_iova_ptr *aad,
795         void *user_data)
796 {
797         struct qat_qp *qp = qp_data;
798         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
799         struct qat_queue *tx_queue = &qp->tx_q;
800         struct qat_sym_op_cookie *cookie;
801         struct qat_sym_session *ctx = dp_ctx->session;
802         struct icp_qat_fw_la_bulk_req *req;
803
804         int32_t data_len;
805         uint32_t tail = dp_ctx->tail;
806
807         req = (struct icp_qat_fw_la_bulk_req *)(
808                 (uint8_t *)tx_queue->base_addr + tail);
809         cookie = qp->op_cookies[tail >> tx_queue->trailz];
810         tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
811         rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
812         rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
813         data_len = qat_sym_build_req_set_data(req, user_data, cookie,
814                         data, n_data_vecs, NULL, 0);
815         if (unlikely(data_len < 0))
816                 return -1;
817
818         enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
819                 (uint32_t)data_len);
820
821         dp_ctx->tail = tail;
822         dp_ctx->cached_enqueue++;
823
824 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
825         qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
826                         NULL, aad, digest);
827 #endif
828         return 0;
829 }
830
831 uint32_t
832 qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
833         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
834         void *user_data[], int *status)
835 {
836         struct qat_qp *qp = qp_data;
837         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
838         struct qat_queue *tx_queue = &qp->tx_q;
839         struct qat_sym_session *ctx = dp_ctx->session;
840         uint32_t i, n;
841         uint32_t tail;
842         struct icp_qat_fw_la_bulk_req *req;
843         int32_t data_len;
844
845         n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
846         if (unlikely(n == 0)) {
847                 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
848                 *status = 0;
849                 return 0;
850         }
851
852         tail = dp_ctx->tail;
853
854         for (i = 0; i < n; i++) {
855                 struct qat_sym_op_cookie *cookie =
856                         qp->op_cookies[tail >> tx_queue->trailz];
857
858                 req  = (struct icp_qat_fw_la_bulk_req *)(
859                         (uint8_t *)tx_queue->base_addr + tail);
860                 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
861
862                 if (vec->dest_sgl) {
863                         data_len = qat_sym_build_req_set_data(req,
864                                 user_data[i], cookie,
865                                 vec->src_sgl[i].vec, vec->src_sgl[i].num,
866                                 vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
867                 } else {
868                         data_len = qat_sym_build_req_set_data(req,
869                                 user_data[i], cookie,
870                                 vec->src_sgl[i].vec,
871                                 vec->src_sgl[i].num, NULL, 0);
872                 }
873
874                 if (unlikely(data_len < 0))
875                         break;
876
877                 enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
878                                 &vec->digest[i], &vec->aad[i], ofs,
879                                 (uint32_t)data_len);
880
881                 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
882
883 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
884                 qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
885                                 vec->src_sgl[i].num, &vec->iv[i], NULL,
886                                 &vec->aad[i], &vec->digest[i]);
887 #endif
888         }
889
890         if (unlikely(i < n))
891                 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
892
893         dp_ctx->tail = tail;
894         dp_ctx->cached_enqueue += i;
895         *status = 0;
896         return i;
897 }
898
899
900 uint32_t
901 qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
902         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
903         uint32_t max_nb_to_dequeue,
904         rte_cryptodev_raw_post_dequeue_t post_dequeue,
905         void **out_user_data, uint8_t is_user_data_array,
906         uint32_t *n_success_jobs, int *return_status)
907 {
908         struct qat_qp *qp = qp_data;
909         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
910         struct qat_queue *rx_queue = &qp->rx_q;
911         struct icp_qat_fw_comn_resp *resp;
912         void *resp_opaque;
913         uint32_t i, n, inflight;
914         uint32_t head;
915         uint8_t status;
916
917         *n_success_jobs = 0;
918         *return_status = 0;
919         head = dp_ctx->head;
920
921         inflight = qp->enqueued - qp->dequeued;
922         if (unlikely(inflight == 0))
923                 return 0;
924
925         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
926                         head);
927         /* no operation ready */
928         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
929                 return 0;
930
931         resp_opaque = (void *)(uintptr_t)resp->opaque_data;
932         /* get the dequeue count */
933         if (get_dequeue_count) {
934                 n = get_dequeue_count(resp_opaque);
935                 if (unlikely(n == 0))
936                         return 0;
937         } else {
938                 if (unlikely(max_nb_to_dequeue == 0))
939                         return 0;
940                 n = max_nb_to_dequeue;
941         }
942
943         out_user_data[0] = resp_opaque;
944         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
945         post_dequeue(resp_opaque, 0, status);
946         *n_success_jobs += status;
947
948         head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
949
950         /* we already finished dequeue when n == 1 */
951         if (unlikely(n == 1)) {
952                 i = 1;
953                 goto end_deq;
954         }
955
956         if (is_user_data_array) {
957                 for (i = 1; i < n; i++) {
958                         resp = (struct icp_qat_fw_comn_resp *)(
959                                 (uint8_t *)rx_queue->base_addr + head);
960                         if (unlikely(*(uint32_t *)resp ==
961                                         ADF_RING_EMPTY_SIG))
962                                 goto end_deq;
963                         out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
964                         status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
965                         *n_success_jobs += status;
966                         post_dequeue(out_user_data[i], i, status);
967                         head = (head + rx_queue->msg_size) &
968                                         rx_queue->modulo_mask;
969                 }
970
971                 goto end_deq;
972         }
973
974         /* opaque is not array */
975         for (i = 1; i < n; i++) {
976                 resp = (struct icp_qat_fw_comn_resp *)(
977                         (uint8_t *)rx_queue->base_addr + head);
978                 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
979                 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
980                         goto end_deq;
981                 head = (head + rx_queue->msg_size) &
982                                 rx_queue->modulo_mask;
983                 post_dequeue(resp_opaque, i, status);
984                 *n_success_jobs += status;
985         }
986
987 end_deq:
988         dp_ctx->head = head;
989         dp_ctx->cached_dequeue += i;
990         return i;
991 }
992
993 void *
994 qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
995         int *dequeue_status, enum rte_crypto_op_status *op_status)
996 {
997         struct qat_qp *qp = qp_data;
998         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
999         struct qat_queue *rx_queue = &qp->rx_q;
1000         register struct icp_qat_fw_comn_resp *resp;
1001
1002         resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
1003                         dp_ctx->head);
1004
1005         if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
1006                 return NULL;
1007
1008         dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
1009                         rx_queue->modulo_mask;
1010         dp_ctx->cached_dequeue++;
1011
1012         *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
1013                         RTE_CRYPTO_OP_STATUS_SUCCESS :
1014                         RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1015         *dequeue_status = 0;
1016         return (void *)(uintptr_t)resp->opaque_data;
1017 }
1018
1019 int
1020 qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
1021 {
1022         struct qat_qp *qp = qp_data;
1023         struct qat_queue *tx_queue = &qp->tx_q;
1024         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
1025
1026         if (unlikely(dp_ctx->cached_enqueue != n))
1027                 return -1;
1028
1029         qp->enqueued += n;
1030         qp->stats.enqueued_count += n;
1031
1032         tx_queue->tail = dp_ctx->tail;
1033
1034         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
1035                         tx_queue->hw_bundle_number,
1036                         tx_queue->hw_queue_number, tx_queue->tail);
1037         tx_queue->csr_tail = tx_queue->tail;
1038         dp_ctx->cached_enqueue = 0;
1039
1040         return 0;
1041 }
1042
1043 int
1044 qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
1045 {
1046         struct qat_qp *qp = qp_data;
1047         struct qat_queue *rx_queue = &qp->rx_q;
1048         struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
1049
1050         if (unlikely(dp_ctx->cached_dequeue != n))
1051                 return -1;
1052
1053         rx_queue->head = dp_ctx->head;
1054         rx_queue->nb_processed_responses += n;
1055         qp->dequeued += n;
1056         qp->stats.dequeued_count += n;
1057         if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
1058                 uint32_t old_head, new_head;
1059                 uint32_t max_head;
1060
1061                 old_head = rx_queue->csr_head;
1062                 new_head = rx_queue->head;
1063                 max_head = qp->nb_descriptors * rx_queue->msg_size;
1064
1065                 /* write out free descriptors */
1066                 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
1067
1068                 if (new_head < old_head) {
1069                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
1070                                         max_head - old_head);
1071                         memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
1072                                         new_head);
1073                 } else {
1074                         memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
1075                                         old_head);
1076                 }
1077                 rx_queue->nb_processed_responses = 0;
1078                 rx_queue->csr_head = new_head;
1079
1080                 /* write current head to CSR */
1081                 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
1082                         rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
1083                         new_head);
1084         }
1085
1086         dp_ctx->cached_dequeue = 0;
1087         return 0;
1088 }
1089
1090 int
1091 qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
1092 {
1093         struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
1094         struct qat_sym_session *ctx = _ctx;
1095
1096         raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
1097         raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
1098         raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
1099         raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
1100
1101         if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1102                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
1103                         !ctx->is_gmac) {
1104                 /* AES-GCM or AES-CCM */
1105                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1106                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1107                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1108                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1109                         && ctx->qat_hash_alg ==
1110                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1111                         raw_dp_ctx->enqueue_burst =
1112                                         qat_sym_dp_enqueue_aead_jobs_gen1;
1113                         raw_dp_ctx->enqueue =
1114                                         qat_sym_dp_enqueue_single_aead_gen1;
1115                 } else {
1116                         raw_dp_ctx->enqueue_burst =
1117                                         qat_sym_dp_enqueue_chain_jobs_gen1;
1118                         raw_dp_ctx->enqueue =
1119                                         qat_sym_dp_enqueue_single_chain_gen1;
1120                 }
1121         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
1122                 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
1123                 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
1124         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1125                 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
1126                         ctx->qat_cipher_alg ==
1127                                 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
1128                         raw_dp_ctx->enqueue_burst =
1129                                         qat_sym_dp_enqueue_aead_jobs_gen1;
1130                         raw_dp_ctx->enqueue =
1131                                         qat_sym_dp_enqueue_single_aead_gen1;
1132                 } else {
1133                         raw_dp_ctx->enqueue_burst =
1134                                         qat_sym_dp_enqueue_cipher_jobs_gen1;
1135                         raw_dp_ctx->enqueue =
1136                                         qat_sym_dp_enqueue_single_cipher_gen1;
1137                 }
1138         } else
1139                 return -1;
1140
1141         return 0;
1142 }
1143
1144 int
1145 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
1146 {
1147         struct qat_sym_session *ctx = session;
1148         qat_sym_build_request_t build_request = NULL;
1149         enum rte_proc_type_t proc_type = rte_eal_process_type();
1150         int handle_mixed = 0;
1151
1152         if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
1153                 return -EINVAL;
1154
1155         if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1156                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
1157                         !ctx->is_gmac) {
1158                 /* AES-GCM or AES-CCM */
1159                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1160                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1161                         (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1162                         && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1163                         && ctx->qat_hash_alg ==
1164                                         ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1165                         /* do_aead = 1; */
1166                         build_request = qat_sym_build_op_aead_gen1;
1167                 } else {
1168                         /* do_auth = 1; do_cipher = 1; */
1169                         build_request = qat_sym_build_op_chain_gen1;
1170                         handle_mixed = 1;
1171                 }
1172         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
1173                 /* do_auth = 1; do_cipher = 0;*/
1174                 build_request = qat_sym_build_op_auth_gen1;
1175         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1176                 /* do_auth = 0; do_cipher = 1; */
1177                 build_request = qat_sym_build_op_cipher_gen1;
1178         }
1179
1180         if (build_request)
1181                 ctx->build_request[proc_type] = build_request;
1182         else
1183                 return -EINVAL;
1184
1185         /* no more work if not mixed op */
1186         if (!handle_mixed)
1187                 return 0;
1188
1189         /* Check none supported algs if mixed */
1190         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
1191                         ctx->qat_cipher_alg !=
1192                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1193                 return -ENOTSUP;
1194         } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
1195                         ctx->qat_cipher_alg !=
1196                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1197                 return -ENOTSUP;
1198         } else if ((ctx->aes_cmac ||
1199                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
1200                         (ctx->qat_cipher_alg ==
1201                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1202                         ctx->qat_cipher_alg ==
1203                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
1204                 return -ENOTSUP;
1205         }
1206
1207         return 0;
1208 }
1209
1210 RTE_INIT(qat_sym_crypto_gen1_init)
1211 {
1212         qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
1213         qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
1214                         qat_sym_crypto_cap_get_gen1;
1215         qat_sym_gen_dev_ops[QAT_GEN1].set_session =
1216                         qat_sym_crypto_set_session_gen1;
1217         qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
1218                         qat_sym_configure_raw_dp_ctx_gen1;
1219         qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
1220                         qat_sym_crypto_feature_flags_get_gen1;
1221 #ifdef RTE_LIB_SECURITY
1222         qat_sym_gen_dev_ops[QAT_GEN1].create_security_ctx =
1223                         qat_sym_create_security_gen1;
1224 #endif
1225 }