1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <rte_cryptodev_pmd.h>
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
16 struct qat_sym_dp_ctx {
17 struct qat_sym_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26 struct rte_crypto_vec *data, uint16_t n_data_vecs)
28 struct qat_queue *tx_queue;
29 struct qat_sym_op_cookie *cookie;
34 if (likely(n_data_vecs == 1)) {
35 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
37 req->comn_mid.src_length = req->comn_mid.dst_length =
42 if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
48 ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49 QAT_COMN_PTR_TYPE_SGL);
50 cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51 list = (struct qat_sgl *)&cookie->qat_sgl_src;
53 for (i = 0; i < n_data_vecs; i++) {
54 list->buffers[i].len = data[i].len;
55 list->buffers[i].resrvd = 0;
56 list->buffers[i].addr = data[i].iova;
57 if (total_len + data[i].len > UINT32_MAX) {
58 QAT_DP_LOG(ERR, "Message too long");
61 total_len += data[i].len;
65 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66 cookie->qat_sgl_src_phys_addr;
67 req->comn_mid.src_length = req->comn_mid.dst_length = 0;
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74 struct icp_qat_fw_la_bulk_req *qat_req)
76 /* copy IV into request if it fits */
77 if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
81 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82 qat_req->comn_hdr.serv_specif_flags,
83 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89 (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
97 for (i = 0; i < n; i++)
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102 RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
104 static __rte_always_inline void
105 enqueue_one_aead_job(struct qat_sym_session *ctx,
106 struct icp_qat_fw_la_bulk_req *req,
107 struct rte_crypto_va_iova_ptr *iv,
108 struct rte_crypto_va_iova_ptr *digest,
109 struct rte_crypto_va_iova_ptr *aad,
110 union rte_crypto_sym_ofs ofs, uint32_t data_len)
112 struct icp_qat_fw_la_cipher_req_params *cipher_param =
113 (void *)&req->serv_specif_rqpars;
114 struct icp_qat_fw_la_auth_req_params *auth_param =
115 (void *)((uint8_t *)&req->serv_specif_rqpars +
116 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
118 uint8_t aad_ccm_real_len;
119 uint8_t aad_len_field_sz;
121 rte_iova_t aad_iova = 0;
124 switch (ctx->qat_hash_alg) {
125 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
126 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
127 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
128 req->comn_hdr.serv_specif_flags,
129 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
130 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
131 ctx->cipher_iv.length);
132 aad_iova = aad->iova;
134 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
136 aad_iova = aad->iova;
137 aad_ccm_real_len = 0;
138 aad_len_field_sz = 0;
139 msg_len_be = rte_bswap32((uint32_t)data_len -
140 ofs.ofs.cipher.head);
142 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
143 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
144 aad_ccm_real_len = ctx->aad_len -
145 ICP_QAT_HW_CCM_AAD_B0_LEN -
146 ICP_QAT_HW_CCM_AAD_LEN_INFO;
152 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
153 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
154 aad_len_field_sz, ctx->digest_length, q);
155 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
156 memcpy(aad_data + ctx->cipher_iv.length +
157 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
158 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
159 (uint8_t *)&msg_len_be,
160 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
162 memcpy(aad_data + ctx->cipher_iv.length +
163 ICP_QAT_HW_CCM_NONCE_OFFSET,
164 (uint8_t *)&msg_len_be +
165 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
169 if (aad_len_field_sz > 0) {
170 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
171 rte_bswap16(aad_ccm_real_len);
173 if ((aad_ccm_real_len + aad_len_field_sz)
174 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
178 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
181 ICP_QAT_HW_CCM_AAD_B0_LEN);
182 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
185 memset(&aad_data[pad_idx], 0, pad_len);
189 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
190 + ICP_QAT_HW_CCM_NONCE_OFFSET,
192 ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
193 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
194 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
196 rte_memcpy((uint8_t *)aad->va +
197 ICP_QAT_HW_CCM_NONCE_OFFSET,
198 (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
199 ctx->cipher_iv.length);
205 cipher_param->cipher_offset = ofs.ofs.cipher.head;
206 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
208 auth_param->auth_off = ofs.ofs.cipher.head;
209 auth_param->auth_len = cipher_param->cipher_length;
210 auth_param->auth_res_addr = digest->iova;
211 auth_param->u1.aad_adr = aad_iova;
213 if (ctx->is_single_pass) {
214 cipher_param->spc_aad_addr = aad_iova;
215 cipher_param->spc_auth_res_addr = digest->iova;
219 static __rte_always_inline int
220 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
221 struct rte_crypto_vec *data, uint16_t n_data_vecs,
222 union rte_crypto_sym_ofs ofs,
223 struct rte_crypto_va_iova_ptr *iv,
224 struct rte_crypto_va_iova_ptr *digest,
225 struct rte_crypto_va_iova_ptr *aad,
228 struct qat_qp *qp = qp_data;
229 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
230 struct qat_queue *tx_queue = &qp->tx_q;
231 struct qat_sym_session *ctx = dp_ctx->session;
232 struct icp_qat_fw_la_bulk_req *req;
234 uint32_t tail = dp_ctx->tail;
236 req = (struct icp_qat_fw_la_bulk_req *)(
237 (uint8_t *)tx_queue->base_addr + tail);
238 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
239 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
240 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
241 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
242 if (unlikely(data_len < 0))
244 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
246 enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
250 dp_ctx->cached_enqueue++;
255 static __rte_always_inline uint32_t
256 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
257 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
258 void *user_data[], int *status)
260 struct qat_qp *qp = qp_data;
261 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
262 struct qat_queue *tx_queue = &qp->tx_q;
263 struct qat_sym_session *ctx = dp_ctx->session;
266 struct icp_qat_fw_la_bulk_req *req;
269 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
270 if (unlikely(n == 0)) {
271 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
278 for (i = 0; i < n; i++) {
279 req = (struct icp_qat_fw_la_bulk_req *)(
280 (uint8_t *)tx_queue->base_addr + tail);
281 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
283 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
285 if (unlikely(data_len < 0))
287 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
288 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
289 &vec->aad[i], ofs, (uint32_t)data_len);
290 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
294 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
297 dp_ctx->cached_enqueue += i;
302 static __rte_always_inline void
303 enqueue_one_cipher_job(struct qat_sym_session *ctx,
304 struct icp_qat_fw_la_bulk_req *req,
305 struct rte_crypto_va_iova_ptr *iv,
306 union rte_crypto_sym_ofs ofs, uint32_t data_len)
308 struct icp_qat_fw_la_cipher_req_params *cipher_param;
310 cipher_param = (void *)&req->serv_specif_rqpars;
313 set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
314 cipher_param->cipher_offset = ofs.ofs.cipher.head;
315 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
319 static __rte_always_inline int
320 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
321 struct rte_crypto_vec *data, uint16_t n_data_vecs,
322 union rte_crypto_sym_ofs ofs,
323 struct rte_crypto_va_iova_ptr *iv,
324 struct rte_crypto_va_iova_ptr *digest __rte_unused,
325 struct rte_crypto_va_iova_ptr *aad __rte_unused,
328 struct qat_qp *qp = qp_data;
329 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
330 struct qat_queue *tx_queue = &qp->tx_q;
331 struct qat_sym_session *ctx = dp_ctx->session;
332 struct icp_qat_fw_la_bulk_req *req;
334 uint32_t tail = dp_ctx->tail;
336 req = (struct icp_qat_fw_la_bulk_req *)(
337 (uint8_t *)tx_queue->base_addr + tail);
338 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
339 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
340 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
341 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
342 if (unlikely(data_len < 0))
344 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
346 enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
349 dp_ctx->cached_enqueue++;
354 static __rte_always_inline uint32_t
355 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
356 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
357 void *user_data[], int *status)
359 struct qat_qp *qp = qp_data;
360 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
361 struct qat_queue *tx_queue = &qp->tx_q;
362 struct qat_sym_session *ctx = dp_ctx->session;
365 struct icp_qat_fw_la_bulk_req *req;
368 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
369 if (unlikely(n == 0)) {
370 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
377 for (i = 0; i < n; i++) {
378 req = (struct icp_qat_fw_la_bulk_req *)(
379 (uint8_t *)tx_queue->base_addr + tail);
380 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
382 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
384 if (unlikely(data_len < 0))
386 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
387 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
389 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
393 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
396 dp_ctx->cached_enqueue += i;
401 static __rte_always_inline void
402 enqueue_one_auth_job(struct qat_sym_session *ctx,
403 struct icp_qat_fw_la_bulk_req *req,
404 struct rte_crypto_va_iova_ptr *digest,
405 struct rte_crypto_va_iova_ptr *auth_iv,
406 union rte_crypto_sym_ofs ofs, uint32_t data_len)
408 struct icp_qat_fw_la_cipher_req_params *cipher_param;
409 struct icp_qat_fw_la_auth_req_params *auth_param;
411 cipher_param = (void *)&req->serv_specif_rqpars;
412 auth_param = (void *)((uint8_t *)cipher_param +
413 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
415 auth_param->auth_off = ofs.ofs.auth.head;
416 auth_param->auth_len = data_len - ofs.ofs.auth.head -
418 auth_param->auth_res_addr = digest->iova;
420 switch (ctx->qat_hash_alg) {
421 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
422 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
423 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
424 auth_param->u1.aad_adr = auth_iv->iova;
426 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
427 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
428 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
429 req->comn_hdr.serv_specif_flags,
430 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
431 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
432 ctx->auth_iv.length);
439 static __rte_always_inline int
440 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
441 struct rte_crypto_vec *data, uint16_t n_data_vecs,
442 union rte_crypto_sym_ofs ofs,
443 struct rte_crypto_va_iova_ptr *iv __rte_unused,
444 struct rte_crypto_va_iova_ptr *digest,
445 struct rte_crypto_va_iova_ptr *auth_iv,
448 struct qat_qp *qp = qp_data;
449 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
450 struct qat_queue *tx_queue = &qp->tx_q;
451 struct qat_sym_session *ctx = dp_ctx->session;
452 struct icp_qat_fw_la_bulk_req *req;
454 uint32_t tail = dp_ctx->tail;
456 req = (struct icp_qat_fw_la_bulk_req *)(
457 (uint8_t *)tx_queue->base_addr + tail);
458 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
459 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
460 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
461 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
462 if (unlikely(data_len < 0))
464 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
466 enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
470 dp_ctx->cached_enqueue++;
475 static __rte_always_inline uint32_t
476 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
477 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
478 void *user_data[], int *status)
480 struct qat_qp *qp = qp_data;
481 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
482 struct qat_queue *tx_queue = &qp->tx_q;
483 struct qat_sym_session *ctx = dp_ctx->session;
486 struct icp_qat_fw_la_bulk_req *req;
489 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
490 if (unlikely(n == 0)) {
491 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
498 for (i = 0; i < n; i++) {
499 req = (struct icp_qat_fw_la_bulk_req *)(
500 (uint8_t *)tx_queue->base_addr + tail);
501 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
503 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
505 if (unlikely(data_len < 0))
507 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
508 enqueue_one_auth_job(ctx, req, &vec->digest[i],
509 &vec->auth_iv[i], ofs, (uint32_t)data_len);
510 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
514 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
517 dp_ctx->cached_enqueue += i;
522 static __rte_always_inline int
523 enqueue_one_chain_job(struct qat_sym_session *ctx,
524 struct icp_qat_fw_la_bulk_req *req,
525 struct rte_crypto_vec *data,
526 uint16_t n_data_vecs,
527 struct rte_crypto_va_iova_ptr *cipher_iv,
528 struct rte_crypto_va_iova_ptr *digest,
529 struct rte_crypto_va_iova_ptr *auth_iv,
530 union rte_crypto_sym_ofs ofs, uint32_t data_len)
532 struct icp_qat_fw_la_cipher_req_params *cipher_param;
533 struct icp_qat_fw_la_auth_req_params *auth_param;
534 rte_iova_t auth_iova_end;
535 int32_t cipher_len, auth_len;
537 cipher_param = (void *)&req->serv_specif_rqpars;
538 auth_param = (void *)((uint8_t *)cipher_param +
539 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
541 cipher_len = data_len - ofs.ofs.cipher.head -
543 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
545 if (unlikely(cipher_len < 0 || auth_len < 0))
548 cipher_param->cipher_offset = ofs.ofs.cipher.head;
549 cipher_param->cipher_length = cipher_len;
550 set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
552 auth_param->auth_off = ofs.ofs.auth.head;
553 auth_param->auth_len = auth_len;
554 auth_param->auth_res_addr = digest->iova;
556 switch (ctx->qat_hash_alg) {
557 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
558 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
559 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
560 auth_param->u1.aad_adr = auth_iv->iova;
562 if (unlikely(n_data_vecs > 1)) {
563 int auth_end_get = 0, i = n_data_vecs - 1;
564 struct rte_crypto_vec *cvec = &data[0];
567 len = data_len - ofs.ofs.auth.tail;
569 while (i >= 0 && len > 0) {
570 if (cvec->len >= len) {
571 auth_iova_end = cvec->iova +
582 if (unlikely(auth_end_get == 0))
585 auth_iova_end = data[0].iova + auth_param->auth_off +
586 auth_param->auth_len;
588 /* Then check if digest-encrypted conditions are met */
589 if ((auth_param->auth_off + auth_param->auth_len <
590 cipher_param->cipher_offset +
591 cipher_param->cipher_length) &&
592 (digest->iova == auth_iova_end)) {
593 /* Handle partial digest encryption */
594 if (cipher_param->cipher_offset +
595 cipher_param->cipher_length <
596 auth_param->auth_off +
597 auth_param->auth_len +
599 req->comn_mid.dst_length =
600 req->comn_mid.src_length =
601 auth_param->auth_off +
602 auth_param->auth_len +
604 struct icp_qat_fw_comn_req_hdr *header =
606 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
607 header->serv_specif_flags,
608 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
611 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
612 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
621 static __rte_always_inline int
622 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
623 struct rte_crypto_vec *data, uint16_t n_data_vecs,
624 union rte_crypto_sym_ofs ofs,
625 struct rte_crypto_va_iova_ptr *cipher_iv,
626 struct rte_crypto_va_iova_ptr *digest,
627 struct rte_crypto_va_iova_ptr *auth_iv,
630 struct qat_qp *qp = qp_data;
631 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
632 struct qat_queue *tx_queue = &qp->tx_q;
633 struct qat_sym_session *ctx = dp_ctx->session;
634 struct icp_qat_fw_la_bulk_req *req;
636 uint32_t tail = dp_ctx->tail;
638 req = (struct icp_qat_fw_la_bulk_req *)(
639 (uint8_t *)tx_queue->base_addr + tail);
640 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
641 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
642 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
643 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
644 if (unlikely(data_len < 0))
646 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
648 if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
649 cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
653 dp_ctx->cached_enqueue++;
658 static __rte_always_inline uint32_t
659 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
660 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
661 void *user_data[], int *status)
663 struct qat_qp *qp = qp_data;
664 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
665 struct qat_queue *tx_queue = &qp->tx_q;
666 struct qat_sym_session *ctx = dp_ctx->session;
669 struct icp_qat_fw_la_bulk_req *req;
672 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
673 if (unlikely(n == 0)) {
674 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
681 for (i = 0; i < n; i++) {
682 req = (struct icp_qat_fw_la_bulk_req *)(
683 (uint8_t *)tx_queue->base_addr + tail);
684 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
686 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
688 if (unlikely(data_len < 0))
690 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
691 if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
692 vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
693 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
696 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
700 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
703 dp_ctx->cached_enqueue += i;
708 static __rte_always_inline uint32_t
709 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
710 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
711 rte_cryptodev_raw_post_dequeue_t post_dequeue,
712 void **out_user_data, uint8_t is_user_data_array,
713 uint32_t *n_success_jobs, int *return_status)
715 struct qat_qp *qp = qp_data;
716 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
717 struct qat_queue *rx_queue = &qp->rx_q;
718 struct icp_qat_fw_comn_resp *resp;
720 uint32_t i, n, inflight;
728 inflight = qp->enqueued - qp->dequeued;
729 if (unlikely(inflight == 0))
732 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
734 /* no operation ready */
735 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
738 resp_opaque = (void *)(uintptr_t)resp->opaque_data;
739 /* get the dequeue count */
740 n = get_dequeue_count(resp_opaque);
741 if (unlikely(n == 0))
744 out_user_data[0] = resp_opaque;
745 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
746 post_dequeue(resp_opaque, 0, status);
747 *n_success_jobs += status;
749 head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
751 /* we already finished dequeue when n == 1 */
752 if (unlikely(n == 1)) {
757 if (is_user_data_array) {
758 for (i = 1; i < n; i++) {
759 resp = (struct icp_qat_fw_comn_resp *)(
760 (uint8_t *)rx_queue->base_addr + head);
761 if (unlikely(*(uint32_t *)resp ==
764 out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
765 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
766 *n_success_jobs += status;
767 post_dequeue(out_user_data[i], i, status);
768 head = (head + rx_queue->msg_size) &
769 rx_queue->modulo_mask;
775 /* opaque is not array */
776 for (i = 1; i < n; i++) {
777 resp = (struct icp_qat_fw_comn_resp *)(
778 (uint8_t *)rx_queue->base_addr + head);
779 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
780 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
782 head = (head + rx_queue->msg_size) &
783 rx_queue->modulo_mask;
784 post_dequeue(resp_opaque, i, status);
785 *n_success_jobs += status;
790 dp_ctx->cached_dequeue += i;
794 static __rte_always_inline void *
795 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
796 enum rte_crypto_op_status *op_status)
798 struct qat_qp *qp = qp_data;
799 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
800 struct qat_queue *rx_queue = &qp->rx_q;
801 register struct icp_qat_fw_comn_resp *resp;
803 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
806 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
809 dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
810 rx_queue->modulo_mask;
811 dp_ctx->cached_dequeue++;
813 *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
814 RTE_CRYPTO_OP_STATUS_SUCCESS :
815 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
817 return (void *)(uintptr_t)resp->opaque_data;
820 static __rte_always_inline int
821 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
823 struct qat_qp *qp = qp_data;
824 struct qat_queue *tx_queue = &qp->tx_q;
825 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
827 if (unlikely(dp_ctx->cached_enqueue != n))
831 qp->stats.enqueued_count += n;
833 tx_queue->tail = dp_ctx->tail;
835 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
836 tx_queue->hw_bundle_number,
837 tx_queue->hw_queue_number, tx_queue->tail);
838 tx_queue->csr_tail = tx_queue->tail;
839 dp_ctx->cached_enqueue = 0;
844 static __rte_always_inline int
845 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
847 struct qat_qp *qp = qp_data;
848 struct qat_queue *rx_queue = &qp->rx_q;
849 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
851 if (unlikely(dp_ctx->cached_dequeue != n))
854 rx_queue->head = dp_ctx->head;
855 rx_queue->nb_processed_responses += n;
857 qp->stats.dequeued_count += n;
858 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
859 uint32_t old_head, new_head;
862 old_head = rx_queue->csr_head;
863 new_head = rx_queue->head;
864 max_head = qp->nb_descriptors * rx_queue->msg_size;
866 /* write out free descriptors */
867 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
869 if (new_head < old_head) {
870 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
871 max_head - old_head);
872 memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
875 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
878 rx_queue->nb_processed_responses = 0;
879 rx_queue->csr_head = new_head;
881 /* write current head to CSR */
882 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
883 rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
887 dp_ctx->cached_dequeue = 0;
892 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
893 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
894 enum rte_crypto_op_sess_type sess_type,
895 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
898 struct qat_sym_session *ctx;
899 struct qat_sym_dp_ctx *dp_ctx;
901 qp = dev->data->queue_pairs[qp_id];
902 dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
905 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
906 sizeof(struct qat_sym_dp_ctx));
907 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
908 dp_ctx->tail = qp->tx_q.tail;
909 dp_ctx->head = qp->rx_q.head;
910 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
913 if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
916 ctx = (struct qat_sym_session *)get_sym_session_private_data(
917 session_ctx.crypto_sess, qat_sym_driver_id);
919 dp_ctx->session = ctx;
921 raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
922 raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
923 raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
924 raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
926 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
927 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
928 /* AES-GCM or AES-CCM */
929 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
930 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
931 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
932 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
933 && ctx->qat_hash_alg ==
934 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
935 raw_dp_ctx->enqueue_burst =
936 qat_sym_dp_enqueue_aead_jobs;
937 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
939 raw_dp_ctx->enqueue_burst =
940 qat_sym_dp_enqueue_chain_jobs;
941 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
943 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
944 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
945 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
946 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
947 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_cipher_jobs;
948 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
956 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
958 return sizeof(struct qat_sym_dp_ctx);