1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2022 Intel Corporation
5 #include <cryptodev_pmd.h>
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
12 #include "qat_sym_session.h"
15 static __rte_always_inline int32_t
16 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
17 struct rte_crypto_vec *data, uint16_t n_data_vecs)
19 struct qat_queue *tx_queue;
20 struct qat_sym_op_cookie *cookie;
25 if (likely(n_data_vecs == 1)) {
26 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
28 req->comn_mid.src_length = req->comn_mid.dst_length =
33 if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
39 ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
40 QAT_COMN_PTR_TYPE_SGL);
41 cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
42 list = (struct qat_sgl *)&cookie->qat_sgl_src;
44 for (i = 0; i < n_data_vecs; i++) {
45 list->buffers[i].len = data[i].len;
46 list->buffers[i].resrvd = 0;
47 list->buffers[i].addr = data[i].iova;
48 if (total_len + data[i].len > UINT32_MAX) {
49 QAT_DP_LOG(ERR, "Message too long");
52 total_len += data[i].len;
56 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
57 cookie->qat_sgl_src_phys_addr;
58 req->comn_mid.src_length = req->comn_mid.dst_length = 0;
62 static __rte_always_inline void
63 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
64 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
65 struct icp_qat_fw_la_bulk_req *qat_req)
67 /* copy IV into request if it fits */
68 if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
69 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
72 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
73 qat_req->comn_hdr.serv_specif_flags,
74 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
75 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
79 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
80 (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
81 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
83 static __rte_always_inline void
84 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
88 for (i = 0; i < n; i++)
92 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
93 RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
95 static __rte_always_inline void
96 enqueue_one_cipher_job(struct qat_sym_session *ctx,
97 struct icp_qat_fw_la_bulk_req *req,
98 struct rte_crypto_va_iova_ptr *iv,
99 union rte_crypto_sym_ofs ofs, uint32_t data_len)
101 struct icp_qat_fw_la_cipher_req_params *cipher_param;
103 cipher_param = (void *)&req->serv_specif_rqpars;
106 set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
107 cipher_param->cipher_offset = ofs.ofs.cipher.head;
108 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
112 static __rte_always_inline int
113 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
114 struct rte_crypto_vec *data, uint16_t n_data_vecs,
115 union rte_crypto_sym_ofs ofs,
116 struct rte_crypto_va_iova_ptr *iv,
117 struct rte_crypto_va_iova_ptr *digest __rte_unused,
118 struct rte_crypto_va_iova_ptr *aad __rte_unused,
121 struct qat_qp *qp = qp_data;
122 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
123 struct qat_queue *tx_queue = &qp->tx_q;
124 struct qat_sym_session *ctx = dp_ctx->session;
125 struct icp_qat_fw_la_bulk_req *req;
127 uint32_t tail = dp_ctx->tail;
129 req = (struct icp_qat_fw_la_bulk_req *)(
130 (uint8_t *)tx_queue->base_addr + tail);
131 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
132 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
133 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
134 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
135 if (unlikely(data_len < 0))
137 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
139 enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
142 dp_ctx->cached_enqueue++;
147 static __rte_always_inline uint32_t
148 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
149 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
150 void *user_data[], int *status)
152 struct qat_qp *qp = qp_data;
153 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
154 struct qat_queue *tx_queue = &qp->tx_q;
155 struct qat_sym_session *ctx = dp_ctx->session;
158 struct icp_qat_fw_la_bulk_req *req;
161 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
162 if (unlikely(n == 0)) {
163 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
170 for (i = 0; i < n; i++) {
171 req = (struct icp_qat_fw_la_bulk_req *)(
172 (uint8_t *)tx_queue->base_addr + tail);
173 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
175 data_len = qat_sym_dp_parse_data_vec(qp, req,
177 vec->src_sgl[i].num);
178 if (unlikely(data_len < 0))
180 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
181 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
183 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
187 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
190 dp_ctx->cached_enqueue += i;
195 static __rte_always_inline void
196 enqueue_one_auth_job(struct qat_sym_session *ctx,
197 struct icp_qat_fw_la_bulk_req *req,
198 struct rte_crypto_va_iova_ptr *digest,
199 struct rte_crypto_va_iova_ptr *auth_iv,
200 union rte_crypto_sym_ofs ofs, uint32_t data_len)
202 struct icp_qat_fw_la_cipher_req_params *cipher_param;
203 struct icp_qat_fw_la_auth_req_params *auth_param;
205 cipher_param = (void *)&req->serv_specif_rqpars;
206 auth_param = (void *)((uint8_t *)cipher_param +
207 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
209 auth_param->auth_off = ofs.ofs.auth.head;
210 auth_param->auth_len = data_len - ofs.ofs.auth.head -
212 auth_param->auth_res_addr = digest->iova;
214 switch (ctx->qat_hash_alg) {
215 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
216 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
217 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
218 auth_param->u1.aad_adr = auth_iv->iova;
220 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
221 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
222 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
223 req->comn_hdr.serv_specif_flags,
224 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
225 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
226 ctx->auth_iv.length);
233 static __rte_always_inline int
234 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
235 struct rte_crypto_vec *data, uint16_t n_data_vecs,
236 union rte_crypto_sym_ofs ofs,
237 struct rte_crypto_va_iova_ptr *iv __rte_unused,
238 struct rte_crypto_va_iova_ptr *digest,
239 struct rte_crypto_va_iova_ptr *auth_iv,
242 struct qat_qp *qp = qp_data;
243 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
244 struct qat_queue *tx_queue = &qp->tx_q;
245 struct qat_sym_session *ctx = dp_ctx->session;
246 struct icp_qat_fw_la_bulk_req *req;
248 uint32_t tail = dp_ctx->tail;
250 req = (struct icp_qat_fw_la_bulk_req *)(
251 (uint8_t *)tx_queue->base_addr + tail);
252 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
253 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
254 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
255 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
256 if (unlikely(data_len < 0))
258 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
260 enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
264 dp_ctx->cached_enqueue++;
269 static __rte_always_inline uint32_t
270 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
271 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
272 void *user_data[], int *status)
274 struct qat_qp *qp = qp_data;
275 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
276 struct qat_queue *tx_queue = &qp->tx_q;
277 struct qat_sym_session *ctx = dp_ctx->session;
280 struct icp_qat_fw_la_bulk_req *req;
283 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
284 if (unlikely(n == 0)) {
285 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
292 for (i = 0; i < n; i++) {
293 req = (struct icp_qat_fw_la_bulk_req *)(
294 (uint8_t *)tx_queue->base_addr + tail);
295 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
297 data_len = qat_sym_dp_parse_data_vec(qp, req,
299 vec->src_sgl[i].num);
300 if (unlikely(data_len < 0))
302 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
303 enqueue_one_auth_job(ctx, req, &vec->digest[i],
304 &vec->auth_iv[i], ofs, (uint32_t)data_len);
305 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
309 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
312 dp_ctx->cached_enqueue += i;
317 static __rte_always_inline int
318 enqueue_one_chain_job(struct qat_sym_session *ctx,
319 struct icp_qat_fw_la_bulk_req *req,
320 struct rte_crypto_vec *data,
321 uint16_t n_data_vecs,
322 struct rte_crypto_va_iova_ptr *cipher_iv,
323 struct rte_crypto_va_iova_ptr *digest,
324 struct rte_crypto_va_iova_ptr *auth_iv,
325 union rte_crypto_sym_ofs ofs, uint32_t data_len)
327 struct icp_qat_fw_la_cipher_req_params *cipher_param;
328 struct icp_qat_fw_la_auth_req_params *auth_param;
329 rte_iova_t auth_iova_end;
330 int32_t cipher_len, auth_len;
332 cipher_param = (void *)&req->serv_specif_rqpars;
333 auth_param = (void *)((uint8_t *)cipher_param +
334 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
336 cipher_len = data_len - ofs.ofs.cipher.head -
338 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
340 if (unlikely(cipher_len < 0 || auth_len < 0))
343 cipher_param->cipher_offset = ofs.ofs.cipher.head;
344 cipher_param->cipher_length = cipher_len;
345 set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
347 auth_param->auth_off = ofs.ofs.auth.head;
348 auth_param->auth_len = auth_len;
349 auth_param->auth_res_addr = digest->iova;
351 switch (ctx->qat_hash_alg) {
352 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
353 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
354 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
355 auth_param->u1.aad_adr = auth_iv->iova;
357 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
358 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
364 if (unlikely(n_data_vecs > 1)) {
365 int auth_end_get = 0, i = n_data_vecs - 1;
366 struct rte_crypto_vec *cvec = &data[0];
369 len = data_len - ofs.ofs.auth.tail;
371 while (i >= 0 && len > 0) {
372 if (cvec->len >= len) {
373 auth_iova_end = cvec->iova + len;
383 if (unlikely(auth_end_get == 0))
386 auth_iova_end = data[0].iova + auth_param->auth_off +
387 auth_param->auth_len;
389 /* Then check if digest-encrypted conditions are met */
390 if ((auth_param->auth_off + auth_param->auth_len <
391 cipher_param->cipher_offset +
392 cipher_param->cipher_length) &&
393 (digest->iova == auth_iova_end)) {
394 /* Handle partial digest encryption */
395 if (cipher_param->cipher_offset +
396 cipher_param->cipher_length <
397 auth_param->auth_off +
398 auth_param->auth_len +
400 req->comn_mid.dst_length =
401 req->comn_mid.src_length =
402 auth_param->auth_off +
403 auth_param->auth_len +
405 struct icp_qat_fw_comn_req_hdr *header =
407 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
408 header->serv_specif_flags,
409 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
415 static __rte_always_inline int
416 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
417 struct rte_crypto_vec *data, uint16_t n_data_vecs,
418 union rte_crypto_sym_ofs ofs,
419 struct rte_crypto_va_iova_ptr *cipher_iv,
420 struct rte_crypto_va_iova_ptr *digest,
421 struct rte_crypto_va_iova_ptr *auth_iv,
424 struct qat_qp *qp = qp_data;
425 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
426 struct qat_queue *tx_queue = &qp->tx_q;
427 struct qat_sym_session *ctx = dp_ctx->session;
428 struct icp_qat_fw_la_bulk_req *req;
430 uint32_t tail = dp_ctx->tail;
432 req = (struct icp_qat_fw_la_bulk_req *)(
433 (uint8_t *)tx_queue->base_addr + tail);
434 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
435 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
436 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
437 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
438 if (unlikely(data_len < 0))
440 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
442 if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
443 cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
447 dp_ctx->cached_enqueue++;
452 static __rte_always_inline uint32_t
453 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
454 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
455 void *user_data[], int *status)
457 struct qat_qp *qp = qp_data;
458 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
459 struct qat_queue *tx_queue = &qp->tx_q;
460 struct qat_sym_session *ctx = dp_ctx->session;
463 struct icp_qat_fw_la_bulk_req *req;
466 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
467 if (unlikely(n == 0)) {
468 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
475 for (i = 0; i < n; i++) {
476 req = (struct icp_qat_fw_la_bulk_req *)(
477 (uint8_t *)tx_queue->base_addr + tail);
478 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
480 data_len = qat_sym_dp_parse_data_vec(qp, req,
482 vec->src_sgl[i].num);
483 if (unlikely(data_len < 0))
485 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
486 if (unlikely(enqueue_one_chain_job(ctx, req,
487 vec->src_sgl[i].vec, vec->src_sgl[i].num,
488 &vec->iv[i], &vec->digest[i],
489 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
492 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
496 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
499 dp_ctx->cached_enqueue += i;
504 static __rte_always_inline void
505 enqueue_one_aead_job(struct qat_sym_session *ctx,
506 struct icp_qat_fw_la_bulk_req *req,
507 struct rte_crypto_va_iova_ptr *iv,
508 struct rte_crypto_va_iova_ptr *digest,
509 struct rte_crypto_va_iova_ptr *aad,
510 union rte_crypto_sym_ofs ofs, uint32_t data_len)
512 struct icp_qat_fw_la_cipher_req_params *cipher_param =
513 (void *)&req->serv_specif_rqpars;
514 struct icp_qat_fw_la_auth_req_params *auth_param =
515 (void *)((uint8_t *)&req->serv_specif_rqpars +
516 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
518 uint8_t aad_ccm_real_len;
519 uint8_t aad_len_field_sz;
521 rte_iova_t aad_iova = 0;
524 /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
525 if (ctx->is_single_pass) {
526 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
529 /* QAT GEN4 uses single pass to treat AEAD as cipher
532 struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
533 (void *)&req->serv_specif_rqpars;
534 cipher_param_20->spc_aad_addr = aad->iova;
535 cipher_param_20->spc_auth_res_addr = digest->iova;
537 cipher_param->spc_aad_addr = aad->iova;
538 cipher_param->spc_auth_res_addr = digest->iova;
544 switch (ctx->qat_hash_alg) {
545 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
546 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
547 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
548 req->comn_hdr.serv_specif_flags,
549 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
550 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
551 ctx->cipher_iv.length);
552 aad_iova = aad->iova;
554 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
556 aad_iova = aad->iova;
557 aad_ccm_real_len = 0;
558 aad_len_field_sz = 0;
559 msg_len_be = rte_bswap32((uint32_t)data_len -
560 ofs.ofs.cipher.head);
562 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
563 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
564 aad_ccm_real_len = ctx->aad_len -
565 ICP_QAT_HW_CCM_AAD_B0_LEN -
566 ICP_QAT_HW_CCM_AAD_LEN_INFO;
572 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
573 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
574 aad_len_field_sz, ctx->digest_length, q);
575 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
576 memcpy(aad_data + ctx->cipher_iv.length +
577 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
578 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
579 (uint8_t *)&msg_len_be,
580 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
582 memcpy(aad_data + ctx->cipher_iv.length +
583 ICP_QAT_HW_CCM_NONCE_OFFSET,
584 (uint8_t *)&msg_len_be +
585 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
589 if (aad_len_field_sz > 0) {
590 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
591 rte_bswap16(aad_ccm_real_len);
593 if ((aad_ccm_real_len + aad_len_field_sz)
594 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
598 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
601 ICP_QAT_HW_CCM_AAD_B0_LEN);
602 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
605 memset(&aad_data[pad_idx], 0, pad_len);
609 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
610 + ICP_QAT_HW_CCM_NONCE_OFFSET,
612 ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
613 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
614 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
616 rte_memcpy((uint8_t *)aad->va +
617 ICP_QAT_HW_CCM_NONCE_OFFSET,
618 (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
619 ctx->cipher_iv.length);
625 cipher_param->cipher_offset = ofs.ofs.cipher.head;
626 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
628 auth_param->auth_off = ofs.ofs.cipher.head;
629 auth_param->auth_len = cipher_param->cipher_length;
630 auth_param->auth_res_addr = digest->iova;
631 auth_param->u1.aad_adr = aad_iova;
634 static __rte_always_inline int
635 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
636 struct rte_crypto_vec *data, uint16_t n_data_vecs,
637 union rte_crypto_sym_ofs ofs,
638 struct rte_crypto_va_iova_ptr *iv,
639 struct rte_crypto_va_iova_ptr *digest,
640 struct rte_crypto_va_iova_ptr *aad,
643 struct qat_qp *qp = qp_data;
644 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
645 struct qat_queue *tx_queue = &qp->tx_q;
646 struct qat_sym_session *ctx = dp_ctx->session;
647 struct icp_qat_fw_la_bulk_req *req;
649 uint32_t tail = dp_ctx->tail;
651 req = (struct icp_qat_fw_la_bulk_req *)(
652 (uint8_t *)tx_queue->base_addr + tail);
653 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
654 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
655 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
656 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
657 if (unlikely(data_len < 0))
659 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
661 enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
665 dp_ctx->cached_enqueue++;
670 static __rte_always_inline uint32_t
671 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
672 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
673 void *user_data[], int *status)
675 struct qat_qp *qp = qp_data;
676 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
677 struct qat_queue *tx_queue = &qp->tx_q;
678 struct qat_sym_session *ctx = dp_ctx->session;
681 struct icp_qat_fw_la_bulk_req *req;
684 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
685 if (unlikely(n == 0)) {
686 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
693 for (i = 0; i < n; i++) {
694 req = (struct icp_qat_fw_la_bulk_req *)(
695 (uint8_t *)tx_queue->base_addr + tail);
696 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
698 data_len = qat_sym_dp_parse_data_vec(qp, req,
700 vec->src_sgl[i].num);
701 if (unlikely(data_len < 0))
703 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
704 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
705 &vec->aad[i], ofs, (uint32_t)data_len);
706 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
710 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
713 dp_ctx->cached_enqueue += i;
718 static __rte_always_inline uint32_t
719 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
720 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
721 uint32_t max_nb_to_dequeue,
722 rte_cryptodev_raw_post_dequeue_t post_dequeue,
723 void **out_user_data, uint8_t is_user_data_array,
724 uint32_t *n_success_jobs, int *return_status)
726 struct qat_qp *qp = qp_data;
727 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
728 struct qat_queue *rx_queue = &qp->rx_q;
729 struct icp_qat_fw_comn_resp *resp;
731 uint32_t i, n, inflight;
739 inflight = qp->enqueued - qp->dequeued;
740 if (unlikely(inflight == 0))
743 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
745 /* no operation ready */
746 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
749 resp_opaque = (void *)(uintptr_t)resp->opaque_data;
750 /* get the dequeue count */
751 if (get_dequeue_count) {
752 n = get_dequeue_count(resp_opaque);
753 if (unlikely(n == 0))
756 if (unlikely(max_nb_to_dequeue == 0))
758 n = max_nb_to_dequeue;
761 out_user_data[0] = resp_opaque;
762 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
763 post_dequeue(resp_opaque, 0, status);
764 *n_success_jobs += status;
766 head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
768 /* we already finished dequeue when n == 1 */
769 if (unlikely(n == 1)) {
774 if (is_user_data_array) {
775 for (i = 1; i < n; i++) {
776 resp = (struct icp_qat_fw_comn_resp *)(
777 (uint8_t *)rx_queue->base_addr + head);
778 if (unlikely(*(uint32_t *)resp ==
781 out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
782 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
783 *n_success_jobs += status;
784 post_dequeue(out_user_data[i], i, status);
785 head = (head + rx_queue->msg_size) &
786 rx_queue->modulo_mask;
792 /* opaque is not array */
793 for (i = 1; i < n; i++) {
794 resp = (struct icp_qat_fw_comn_resp *)(
795 (uint8_t *)rx_queue->base_addr + head);
796 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
797 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
799 head = (head + rx_queue->msg_size) &
800 rx_queue->modulo_mask;
801 post_dequeue(resp_opaque, i, status);
802 *n_success_jobs += status;
807 dp_ctx->cached_dequeue += i;
811 static __rte_always_inline void *
812 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
813 enum rte_crypto_op_status *op_status)
815 struct qat_qp *qp = qp_data;
816 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
817 struct qat_queue *rx_queue = &qp->rx_q;
818 register struct icp_qat_fw_comn_resp *resp;
820 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
823 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
826 dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
827 rx_queue->modulo_mask;
828 dp_ctx->cached_dequeue++;
830 *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
831 RTE_CRYPTO_OP_STATUS_SUCCESS :
832 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
834 return (void *)(uintptr_t)resp->opaque_data;
837 static __rte_always_inline int
838 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
840 struct qat_qp *qp = qp_data;
841 struct qat_queue *tx_queue = &qp->tx_q;
842 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
844 if (unlikely(dp_ctx->cached_enqueue != n))
848 qp->stats.enqueued_count += n;
850 tx_queue->tail = dp_ctx->tail;
852 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
853 tx_queue->hw_bundle_number,
854 tx_queue->hw_queue_number, tx_queue->tail);
855 tx_queue->csr_tail = tx_queue->tail;
856 dp_ctx->cached_enqueue = 0;
861 static __rte_always_inline int
862 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
864 struct qat_qp *qp = qp_data;
865 struct qat_queue *rx_queue = &qp->rx_q;
866 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
868 if (unlikely(dp_ctx->cached_dequeue != n))
871 rx_queue->head = dp_ctx->head;
872 rx_queue->nb_processed_responses += n;
874 qp->stats.dequeued_count += n;
875 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
876 uint32_t old_head, new_head;
879 old_head = rx_queue->csr_head;
880 new_head = rx_queue->head;
881 max_head = qp->nb_descriptors * rx_queue->msg_size;
883 /* write out free descriptors */
884 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
886 if (new_head < old_head) {
887 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
888 max_head - old_head);
889 memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
892 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
895 rx_queue->nb_processed_responses = 0;
896 rx_queue->csr_head = new_head;
898 /* write current head to CSR */
899 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
900 rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
904 dp_ctx->cached_dequeue = 0;
909 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
910 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
911 enum rte_crypto_op_sess_type sess_type,
912 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
915 struct qat_sym_session *ctx;
916 struct qat_sym_dp_ctx *dp_ctx;
918 qp = dev->data->queue_pairs[qp_id];
919 dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
922 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
923 sizeof(struct qat_sym_dp_ctx));
924 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
925 dp_ctx->tail = qp->tx_q.tail;
926 dp_ctx->head = qp->rx_q.head;
927 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
930 if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
933 ctx = (struct qat_sym_session *)get_sym_session_private_data(
934 session_ctx.crypto_sess, qat_sym_driver_id);
936 dp_ctx->session = ctx;
938 raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
939 raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
940 raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
941 raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
943 if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
944 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
946 /* AES-GCM or AES-CCM */
947 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
948 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
949 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
950 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
951 && ctx->qat_hash_alg ==
952 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
953 raw_dp_ctx->enqueue_burst =
954 qat_sym_dp_enqueue_aead_jobs;
955 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
957 raw_dp_ctx->enqueue_burst =
958 qat_sym_dp_enqueue_chain_jobs;
959 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
961 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
962 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
963 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
964 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
965 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
966 ctx->qat_cipher_alg ==
967 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
968 raw_dp_ctx->enqueue_burst =
969 qat_sym_dp_enqueue_aead_jobs;
970 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
972 raw_dp_ctx->enqueue_burst =
973 qat_sym_dp_enqueue_cipher_jobs;
974 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
983 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
985 return sizeof(struct qat_sym_dp_ctx);