1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <rte_cryptodev_pmd.h>
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
16 struct qat_sym_dp_ctx {
17 struct qat_sym_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26 struct rte_crypto_vec *data, uint16_t n_data_vecs)
28 struct qat_queue *tx_queue;
29 struct qat_sym_op_cookie *cookie;
34 if (likely(n_data_vecs == 1)) {
35 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
37 req->comn_mid.src_length = req->comn_mid.dst_length =
42 if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
48 ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49 QAT_COMN_PTR_TYPE_SGL);
50 cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51 list = (struct qat_sgl *)&cookie->qat_sgl_src;
53 for (i = 0; i < n_data_vecs; i++) {
54 list->buffers[i].len = data[i].len;
55 list->buffers[i].resrvd = 0;
56 list->buffers[i].addr = data[i].iova;
57 if (total_len + data[i].len > UINT32_MAX) {
58 QAT_DP_LOG(ERR, "Message too long");
61 total_len += data[i].len;
65 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66 cookie->qat_sgl_src_phys_addr;
67 req->comn_mid.src_length = req->comn_mid.dst_length = 0;
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74 struct icp_qat_fw_la_bulk_req *qat_req)
76 /* copy IV into request if it fits */
77 if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
81 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82 qat_req->comn_hdr.serv_specif_flags,
83 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89 (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
97 for (i = 0; i < n; i++)
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102 RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
104 static __rte_always_inline void
105 enqueue_one_aead_job(struct qat_sym_session *ctx,
106 struct icp_qat_fw_la_bulk_req *req,
107 struct rte_crypto_va_iova_ptr *iv,
108 struct rte_crypto_va_iova_ptr *digest,
109 struct rte_crypto_va_iova_ptr *aad,
110 union rte_crypto_sym_ofs ofs, uint32_t data_len)
112 struct icp_qat_fw_la_cipher_req_params *cipher_param =
113 (void *)&req->serv_specif_rqpars;
114 struct icp_qat_fw_la_auth_req_params *auth_param =
115 (void *)((uint8_t *)&req->serv_specif_rqpars +
116 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
118 uint8_t aad_ccm_real_len;
119 uint8_t aad_len_field_sz;
121 rte_iova_t aad_iova = 0;
124 switch (ctx->qat_hash_alg) {
125 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
126 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
127 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
128 req->comn_hdr.serv_specif_flags,
129 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
130 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
131 ctx->cipher_iv.length);
132 aad_iova = aad->iova;
134 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
136 aad_iova = aad->iova;
137 aad_ccm_real_len = 0;
138 aad_len_field_sz = 0;
139 msg_len_be = rte_bswap32((uint32_t)data_len -
140 ofs.ofs.cipher.head);
142 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
143 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
144 aad_ccm_real_len = ctx->aad_len -
145 ICP_QAT_HW_CCM_AAD_B0_LEN -
146 ICP_QAT_HW_CCM_AAD_LEN_INFO;
152 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
153 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
154 aad_len_field_sz, ctx->digest_length, q);
155 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
156 memcpy(aad_data + ctx->cipher_iv.length +
157 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
158 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
159 (uint8_t *)&msg_len_be,
160 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
162 memcpy(aad_data + ctx->cipher_iv.length +
163 ICP_QAT_HW_CCM_NONCE_OFFSET,
164 (uint8_t *)&msg_len_be +
165 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
169 if (aad_len_field_sz > 0) {
170 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
171 rte_bswap16(aad_ccm_real_len);
173 if ((aad_ccm_real_len + aad_len_field_sz)
174 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
178 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
181 ICP_QAT_HW_CCM_AAD_B0_LEN);
182 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
185 memset(&aad_data[pad_idx], 0, pad_len);
189 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
190 + ICP_QAT_HW_CCM_NONCE_OFFSET,
192 ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
193 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
194 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
196 rte_memcpy((uint8_t *)aad->va +
197 ICP_QAT_HW_CCM_NONCE_OFFSET,
198 (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
199 ctx->cipher_iv.length);
205 cipher_param->cipher_offset = ofs.ofs.cipher.head;
206 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
208 auth_param->auth_off = ofs.ofs.cipher.head;
209 auth_param->auth_len = cipher_param->cipher_length;
210 auth_param->auth_res_addr = digest->iova;
211 auth_param->u1.aad_adr = aad_iova;
213 if (ctx->is_single_pass) {
214 cipher_param->spc_aad_addr = aad_iova;
215 cipher_param->spc_auth_res_addr = digest->iova;
219 static __rte_always_inline int
220 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
221 struct rte_crypto_vec *data, uint16_t n_data_vecs,
222 union rte_crypto_sym_ofs ofs,
223 struct rte_crypto_va_iova_ptr *iv,
224 struct rte_crypto_va_iova_ptr *digest,
225 struct rte_crypto_va_iova_ptr *aad,
228 struct qat_qp *qp = qp_data;
229 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
230 struct qat_queue *tx_queue = &qp->tx_q;
231 struct qat_sym_session *ctx = dp_ctx->session;
232 struct icp_qat_fw_la_bulk_req *req;
234 uint32_t tail = dp_ctx->tail;
236 req = (struct icp_qat_fw_la_bulk_req *)(
237 (uint8_t *)tx_queue->base_addr + tail);
238 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
239 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
240 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
241 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
242 if (unlikely(data_len < 0))
244 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
246 enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
250 dp_ctx->cached_enqueue++;
255 static __rte_always_inline uint32_t
256 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
257 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
258 void *user_data[], int *status)
260 struct qat_qp *qp = qp_data;
261 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
262 struct qat_queue *tx_queue = &qp->tx_q;
263 struct qat_sym_session *ctx = dp_ctx->session;
266 struct icp_qat_fw_la_bulk_req *req;
269 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
270 if (unlikely(n == 0)) {
271 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
278 for (i = 0; i < n; i++) {
279 req = (struct icp_qat_fw_la_bulk_req *)(
280 (uint8_t *)tx_queue->base_addr + tail);
281 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
283 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
285 if (unlikely(data_len < 0))
287 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
288 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
289 &vec->aad[i], ofs, (uint32_t)data_len);
290 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
294 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
297 dp_ctx->cached_enqueue += i;
302 static __rte_always_inline void
303 enqueue_one_cipher_job(struct qat_sym_session *ctx,
304 struct icp_qat_fw_la_bulk_req *req,
305 struct rte_crypto_va_iova_ptr *iv,
306 union rte_crypto_sym_ofs ofs, uint32_t data_len)
308 struct icp_qat_fw_la_cipher_req_params *cipher_param;
310 cipher_param = (void *)&req->serv_specif_rqpars;
313 set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
314 cipher_param->cipher_offset = ofs.ofs.cipher.head;
315 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
319 static __rte_always_inline int
320 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
321 struct rte_crypto_vec *data, uint16_t n_data_vecs,
322 union rte_crypto_sym_ofs ofs,
323 struct rte_crypto_va_iova_ptr *iv,
324 struct rte_crypto_va_iova_ptr *digest __rte_unused,
325 struct rte_crypto_va_iova_ptr *aad __rte_unused,
328 struct qat_qp *qp = qp_data;
329 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
330 struct qat_queue *tx_queue = &qp->tx_q;
331 struct qat_sym_session *ctx = dp_ctx->session;
332 struct icp_qat_fw_la_bulk_req *req;
334 uint32_t tail = dp_ctx->tail;
336 req = (struct icp_qat_fw_la_bulk_req *)(
337 (uint8_t *)tx_queue->base_addr + tail);
338 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
339 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
340 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
341 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
342 if (unlikely(data_len < 0))
344 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
346 enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
349 dp_ctx->cached_enqueue++;
354 static __rte_always_inline uint32_t
355 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
356 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
357 void *user_data[], int *status)
359 struct qat_qp *qp = qp_data;
360 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
361 struct qat_queue *tx_queue = &qp->tx_q;
362 struct qat_sym_session *ctx = dp_ctx->session;
365 struct icp_qat_fw_la_bulk_req *req;
368 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
369 if (unlikely(n == 0)) {
370 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
377 for (i = 0; i < n; i++) {
378 req = (struct icp_qat_fw_la_bulk_req *)(
379 (uint8_t *)tx_queue->base_addr + tail);
380 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
382 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
384 if (unlikely(data_len < 0))
386 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
387 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
389 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
393 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
396 dp_ctx->cached_enqueue += i;
401 static __rte_always_inline void
402 enqueue_one_auth_job(struct qat_sym_session *ctx,
403 struct icp_qat_fw_la_bulk_req *req,
404 struct rte_crypto_va_iova_ptr *digest,
405 struct rte_crypto_va_iova_ptr *auth_iv,
406 union rte_crypto_sym_ofs ofs, uint32_t data_len)
408 struct icp_qat_fw_la_cipher_req_params *cipher_param;
409 struct icp_qat_fw_la_auth_req_params *auth_param;
411 cipher_param = (void *)&req->serv_specif_rqpars;
412 auth_param = (void *)((uint8_t *)cipher_param +
413 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
415 auth_param->auth_off = ofs.ofs.auth.head;
416 auth_param->auth_len = data_len - ofs.ofs.auth.head -
418 auth_param->auth_res_addr = digest->iova;
420 switch (ctx->qat_hash_alg) {
421 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
422 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
423 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
424 auth_param->u1.aad_adr = auth_iv->iova;
426 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
427 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
428 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
429 req->comn_hdr.serv_specif_flags,
430 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
431 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
432 ctx->auth_iv.length);
439 static __rte_always_inline int
440 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
441 struct rte_crypto_vec *data, uint16_t n_data_vecs,
442 union rte_crypto_sym_ofs ofs,
443 struct rte_crypto_va_iova_ptr *iv __rte_unused,
444 struct rte_crypto_va_iova_ptr *digest,
445 struct rte_crypto_va_iova_ptr *auth_iv,
448 struct qat_qp *qp = qp_data;
449 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
450 struct qat_queue *tx_queue = &qp->tx_q;
451 struct qat_sym_session *ctx = dp_ctx->session;
452 struct icp_qat_fw_la_bulk_req *req;
454 uint32_t tail = dp_ctx->tail;
456 req = (struct icp_qat_fw_la_bulk_req *)(
457 (uint8_t *)tx_queue->base_addr + tail);
458 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
459 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
460 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
461 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
462 if (unlikely(data_len < 0))
464 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
466 enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
470 dp_ctx->cached_enqueue++;
475 static __rte_always_inline uint32_t
476 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
477 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
478 void *user_data[], int *status)
480 struct qat_qp *qp = qp_data;
481 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
482 struct qat_queue *tx_queue = &qp->tx_q;
483 struct qat_sym_session *ctx = dp_ctx->session;
486 struct icp_qat_fw_la_bulk_req *req;
489 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
490 if (unlikely(n == 0)) {
491 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
498 for (i = 0; i < n; i++) {
499 req = (struct icp_qat_fw_la_bulk_req *)(
500 (uint8_t *)tx_queue->base_addr + tail);
501 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
503 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
505 if (unlikely(data_len < 0))
507 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
508 enqueue_one_auth_job(ctx, req, &vec->digest[i],
509 &vec->auth_iv[i], ofs, (uint32_t)data_len);
510 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
514 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
517 dp_ctx->cached_enqueue += i;
522 static __rte_always_inline int
523 enqueue_one_chain_job(struct qat_sym_session *ctx,
524 struct icp_qat_fw_la_bulk_req *req,
525 struct rte_crypto_vec *data,
526 uint16_t n_data_vecs,
527 struct rte_crypto_va_iova_ptr *cipher_iv,
528 struct rte_crypto_va_iova_ptr *digest,
529 struct rte_crypto_va_iova_ptr *auth_iv,
530 union rte_crypto_sym_ofs ofs, uint32_t data_len)
532 struct icp_qat_fw_la_cipher_req_params *cipher_param;
533 struct icp_qat_fw_la_auth_req_params *auth_param;
534 rte_iova_t auth_iova_end;
535 int32_t cipher_len, auth_len;
537 cipher_param = (void *)&req->serv_specif_rqpars;
538 auth_param = (void *)((uint8_t *)cipher_param +
539 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
541 cipher_len = data_len - ofs.ofs.cipher.head -
543 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
545 if (unlikely(cipher_len < 0 || auth_len < 0))
548 cipher_param->cipher_offset = ofs.ofs.cipher.head;
549 cipher_param->cipher_length = cipher_len;
550 set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
552 auth_param->auth_off = ofs.ofs.auth.head;
553 auth_param->auth_len = auth_len;
554 auth_param->auth_res_addr = digest->iova;
556 switch (ctx->qat_hash_alg) {
557 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
558 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
559 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
560 auth_param->u1.aad_adr = auth_iv->iova;
562 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
563 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
569 if (unlikely(n_data_vecs > 1)) {
570 int auth_end_get = 0, i = n_data_vecs - 1;
571 struct rte_crypto_vec *cvec = &data[0];
574 len = data_len - ofs.ofs.auth.tail;
576 while (i >= 0 && len > 0) {
577 if (cvec->len >= len) {
578 auth_iova_end = cvec->iova + len;
588 if (unlikely(auth_end_get == 0))
591 auth_iova_end = data[0].iova + auth_param->auth_off +
592 auth_param->auth_len;
594 /* Then check if digest-encrypted conditions are met */
595 if ((auth_param->auth_off + auth_param->auth_len <
596 cipher_param->cipher_offset +
597 cipher_param->cipher_length) &&
598 (digest->iova == auth_iova_end)) {
599 /* Handle partial digest encryption */
600 if (cipher_param->cipher_offset +
601 cipher_param->cipher_length <
602 auth_param->auth_off +
603 auth_param->auth_len +
605 req->comn_mid.dst_length =
606 req->comn_mid.src_length =
607 auth_param->auth_off +
608 auth_param->auth_len +
610 struct icp_qat_fw_comn_req_hdr *header =
612 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
613 header->serv_specif_flags,
614 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
620 static __rte_always_inline int
621 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
622 struct rte_crypto_vec *data, uint16_t n_data_vecs,
623 union rte_crypto_sym_ofs ofs,
624 struct rte_crypto_va_iova_ptr *cipher_iv,
625 struct rte_crypto_va_iova_ptr *digest,
626 struct rte_crypto_va_iova_ptr *auth_iv,
629 struct qat_qp *qp = qp_data;
630 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
631 struct qat_queue *tx_queue = &qp->tx_q;
632 struct qat_sym_session *ctx = dp_ctx->session;
633 struct icp_qat_fw_la_bulk_req *req;
635 uint32_t tail = dp_ctx->tail;
637 req = (struct icp_qat_fw_la_bulk_req *)(
638 (uint8_t *)tx_queue->base_addr + tail);
639 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
640 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
641 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
642 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
643 if (unlikely(data_len < 0))
645 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
647 if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
648 cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
652 dp_ctx->cached_enqueue++;
657 static __rte_always_inline uint32_t
658 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
659 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
660 void *user_data[], int *status)
662 struct qat_qp *qp = qp_data;
663 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
664 struct qat_queue *tx_queue = &qp->tx_q;
665 struct qat_sym_session *ctx = dp_ctx->session;
668 struct icp_qat_fw_la_bulk_req *req;
671 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
672 if (unlikely(n == 0)) {
673 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
680 for (i = 0; i < n; i++) {
681 req = (struct icp_qat_fw_la_bulk_req *)(
682 (uint8_t *)tx_queue->base_addr + tail);
683 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
685 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
687 if (unlikely(data_len < 0))
689 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
690 if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
691 vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
692 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
695 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
699 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
702 dp_ctx->cached_enqueue += i;
707 static __rte_always_inline uint32_t
708 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
709 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
710 uint32_t max_nb_to_dequeue,
711 rte_cryptodev_raw_post_dequeue_t post_dequeue,
712 void **out_user_data, uint8_t is_user_data_array,
713 uint32_t *n_success_jobs, int *return_status)
715 struct qat_qp *qp = qp_data;
716 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
717 struct qat_queue *rx_queue = &qp->rx_q;
718 struct icp_qat_fw_comn_resp *resp;
720 uint32_t i, n, inflight;
728 inflight = qp->enqueued - qp->dequeued;
729 if (unlikely(inflight == 0))
732 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
734 /* no operation ready */
735 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
738 resp_opaque = (void *)(uintptr_t)resp->opaque_data;
739 /* get the dequeue count */
740 if (get_dequeue_count) {
741 n = get_dequeue_count(resp_opaque);
742 if (unlikely(n == 0))
745 head = (head + rx_queue->msg_size * (n - 1)) &
746 rx_queue->modulo_mask;
747 resp = (struct icp_qat_fw_comn_resp *)(
748 (uint8_t *)rx_queue->base_addr + head);
749 if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
753 if (unlikely(max_nb_to_dequeue == 0))
755 n = max_nb_to_dequeue;
758 out_user_data[0] = resp_opaque;
759 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
760 post_dequeue(resp_opaque, 0, status);
761 *n_success_jobs += status;
763 head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
765 /* we already finished dequeue when n == 1 */
766 if (unlikely(n == 1)) {
771 if (is_user_data_array) {
772 for (i = 1; i < n; i++) {
773 resp = (struct icp_qat_fw_comn_resp *)(
774 (uint8_t *)rx_queue->base_addr + head);
775 if (unlikely(*(uint32_t *)resp ==
778 out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
779 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
780 *n_success_jobs += status;
781 post_dequeue(out_user_data[i], i, status);
782 head = (head + rx_queue->msg_size) &
783 rx_queue->modulo_mask;
789 /* opaque is not array */
790 for (i = 1; i < n; i++) {
791 resp = (struct icp_qat_fw_comn_resp *)(
792 (uint8_t *)rx_queue->base_addr + head);
793 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
794 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
796 head = (head + rx_queue->msg_size) &
797 rx_queue->modulo_mask;
798 post_dequeue(resp_opaque, i, status);
799 *n_success_jobs += status;
804 dp_ctx->cached_dequeue += i;
808 static __rte_always_inline void *
809 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
810 enum rte_crypto_op_status *op_status)
812 struct qat_qp *qp = qp_data;
813 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
814 struct qat_queue *rx_queue = &qp->rx_q;
815 register struct icp_qat_fw_comn_resp *resp;
817 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
820 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
823 dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
824 rx_queue->modulo_mask;
825 dp_ctx->cached_dequeue++;
827 *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
828 RTE_CRYPTO_OP_STATUS_SUCCESS :
829 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
831 return (void *)(uintptr_t)resp->opaque_data;
834 static __rte_always_inline int
835 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
837 struct qat_qp *qp = qp_data;
838 struct qat_queue *tx_queue = &qp->tx_q;
839 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
841 if (unlikely(dp_ctx->cached_enqueue != n))
845 qp->stats.enqueued_count += n;
847 tx_queue->tail = dp_ctx->tail;
849 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
850 tx_queue->hw_bundle_number,
851 tx_queue->hw_queue_number, tx_queue->tail);
852 tx_queue->csr_tail = tx_queue->tail;
853 dp_ctx->cached_enqueue = 0;
858 static __rte_always_inline int
859 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
861 struct qat_qp *qp = qp_data;
862 struct qat_queue *rx_queue = &qp->rx_q;
863 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
865 if (unlikely(dp_ctx->cached_dequeue != n))
868 rx_queue->head = dp_ctx->head;
869 rx_queue->nb_processed_responses += n;
871 qp->stats.dequeued_count += n;
872 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
873 uint32_t old_head, new_head;
876 old_head = rx_queue->csr_head;
877 new_head = rx_queue->head;
878 max_head = qp->nb_descriptors * rx_queue->msg_size;
880 /* write out free descriptors */
881 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
883 if (new_head < old_head) {
884 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
885 max_head - old_head);
886 memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
889 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
892 rx_queue->nb_processed_responses = 0;
893 rx_queue->csr_head = new_head;
895 /* write current head to CSR */
896 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
897 rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
901 dp_ctx->cached_dequeue = 0;
906 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
907 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
908 enum rte_crypto_op_sess_type sess_type,
909 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
912 struct qat_sym_session *ctx;
913 struct qat_sym_dp_ctx *dp_ctx;
915 qp = dev->data->queue_pairs[qp_id];
916 dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
919 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
920 sizeof(struct qat_sym_dp_ctx));
921 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
922 dp_ctx->tail = qp->tx_q.tail;
923 dp_ctx->head = qp->rx_q.head;
924 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
927 if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
930 ctx = (struct qat_sym_session *)get_sym_session_private_data(
931 session_ctx.crypto_sess, qat_sym_driver_id);
933 dp_ctx->session = ctx;
935 raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
936 raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
937 raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
938 raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
940 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
941 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
942 /* AES-GCM or AES-CCM */
943 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
944 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
945 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
946 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
947 && ctx->qat_hash_alg ==
948 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
949 raw_dp_ctx->enqueue_burst =
950 qat_sym_dp_enqueue_aead_jobs;
951 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
953 raw_dp_ctx->enqueue_burst =
954 qat_sym_dp_enqueue_chain_jobs;
955 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
957 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
958 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
959 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
960 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
961 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_cipher_jobs;
962 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
970 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
972 return sizeof(struct qat_sym_dp_ctx);