1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <cryptodev_pmd.h>
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
16 struct qat_sym_dp_ctx {
17 struct qat_sym_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26 struct rte_crypto_vec *data, uint16_t n_data_vecs)
28 struct qat_queue *tx_queue;
29 struct qat_sym_op_cookie *cookie;
34 if (likely(n_data_vecs == 1)) {
35 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
37 req->comn_mid.src_length = req->comn_mid.dst_length =
42 if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
48 ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49 QAT_COMN_PTR_TYPE_SGL);
50 cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51 list = (struct qat_sgl *)&cookie->qat_sgl_src;
53 for (i = 0; i < n_data_vecs; i++) {
54 list->buffers[i].len = data[i].len;
55 list->buffers[i].resrvd = 0;
56 list->buffers[i].addr = data[i].iova;
57 if (total_len + data[i].len > UINT32_MAX) {
58 QAT_DP_LOG(ERR, "Message too long");
61 total_len += data[i].len;
65 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66 cookie->qat_sgl_src_phys_addr;
67 req->comn_mid.src_length = req->comn_mid.dst_length = 0;
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74 struct icp_qat_fw_la_bulk_req *qat_req)
76 /* copy IV into request if it fits */
77 if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
81 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82 qat_req->comn_hdr.serv_specif_flags,
83 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89 (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
97 for (i = 0; i < n; i++)
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102 RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
104 static __rte_always_inline void
105 enqueue_one_cipher_job(struct qat_sym_session *ctx,
106 struct icp_qat_fw_la_bulk_req *req,
107 struct rte_crypto_va_iova_ptr *iv,
108 union rte_crypto_sym_ofs ofs, uint32_t data_len)
110 struct icp_qat_fw_la_cipher_req_params *cipher_param;
112 cipher_param = (void *)&req->serv_specif_rqpars;
115 set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
116 cipher_param->cipher_offset = ofs.ofs.cipher.head;
117 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
121 static __rte_always_inline int
122 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
123 struct rte_crypto_vec *data, uint16_t n_data_vecs,
124 union rte_crypto_sym_ofs ofs,
125 struct rte_crypto_va_iova_ptr *iv,
126 struct rte_crypto_va_iova_ptr *digest __rte_unused,
127 struct rte_crypto_va_iova_ptr *aad __rte_unused,
130 struct qat_qp *qp = qp_data;
131 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
132 struct qat_queue *tx_queue = &qp->tx_q;
133 struct qat_sym_session *ctx = dp_ctx->session;
134 struct icp_qat_fw_la_bulk_req *req;
136 uint32_t tail = dp_ctx->tail;
138 req = (struct icp_qat_fw_la_bulk_req *)(
139 (uint8_t *)tx_queue->base_addr + tail);
140 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
141 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
142 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
143 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
144 if (unlikely(data_len < 0))
146 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
148 enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
151 dp_ctx->cached_enqueue++;
156 static __rte_always_inline uint32_t
157 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
158 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
159 void *user_data[], int *status)
161 struct qat_qp *qp = qp_data;
162 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
163 struct qat_queue *tx_queue = &qp->tx_q;
164 struct qat_sym_session *ctx = dp_ctx->session;
167 struct icp_qat_fw_la_bulk_req *req;
170 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
171 if (unlikely(n == 0)) {
172 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
179 for (i = 0; i < n; i++) {
180 req = (struct icp_qat_fw_la_bulk_req *)(
181 (uint8_t *)tx_queue->base_addr + tail);
182 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
184 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
186 if (unlikely(data_len < 0))
188 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
189 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
191 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
195 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
198 dp_ctx->cached_enqueue += i;
203 static __rte_always_inline void
204 enqueue_one_auth_job(struct qat_sym_session *ctx,
205 struct icp_qat_fw_la_bulk_req *req,
206 struct rte_crypto_va_iova_ptr *digest,
207 struct rte_crypto_va_iova_ptr *auth_iv,
208 union rte_crypto_sym_ofs ofs, uint32_t data_len)
210 struct icp_qat_fw_la_cipher_req_params *cipher_param;
211 struct icp_qat_fw_la_auth_req_params *auth_param;
213 cipher_param = (void *)&req->serv_specif_rqpars;
214 auth_param = (void *)((uint8_t *)cipher_param +
215 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
217 auth_param->auth_off = ofs.ofs.auth.head;
218 auth_param->auth_len = data_len - ofs.ofs.auth.head -
220 auth_param->auth_res_addr = digest->iova;
222 switch (ctx->qat_hash_alg) {
223 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
224 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
225 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
226 auth_param->u1.aad_adr = auth_iv->iova;
228 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
229 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
230 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
231 req->comn_hdr.serv_specif_flags,
232 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
233 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
234 ctx->auth_iv.length);
241 static __rte_always_inline int
242 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
243 struct rte_crypto_vec *data, uint16_t n_data_vecs,
244 union rte_crypto_sym_ofs ofs,
245 struct rte_crypto_va_iova_ptr *iv __rte_unused,
246 struct rte_crypto_va_iova_ptr *digest,
247 struct rte_crypto_va_iova_ptr *auth_iv,
250 struct qat_qp *qp = qp_data;
251 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
252 struct qat_queue *tx_queue = &qp->tx_q;
253 struct qat_sym_session *ctx = dp_ctx->session;
254 struct icp_qat_fw_la_bulk_req *req;
256 uint32_t tail = dp_ctx->tail;
258 req = (struct icp_qat_fw_la_bulk_req *)(
259 (uint8_t *)tx_queue->base_addr + tail);
260 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
261 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
262 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
263 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
264 if (unlikely(data_len < 0))
266 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
268 enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
272 dp_ctx->cached_enqueue++;
277 static __rte_always_inline uint32_t
278 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
279 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
280 void *user_data[], int *status)
282 struct qat_qp *qp = qp_data;
283 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
284 struct qat_queue *tx_queue = &qp->tx_q;
285 struct qat_sym_session *ctx = dp_ctx->session;
288 struct icp_qat_fw_la_bulk_req *req;
291 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
292 if (unlikely(n == 0)) {
293 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
300 for (i = 0; i < n; i++) {
301 req = (struct icp_qat_fw_la_bulk_req *)(
302 (uint8_t *)tx_queue->base_addr + tail);
303 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
305 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
307 if (unlikely(data_len < 0))
309 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
310 enqueue_one_auth_job(ctx, req, &vec->digest[i],
311 &vec->auth_iv[i], ofs, (uint32_t)data_len);
312 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
316 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
319 dp_ctx->cached_enqueue += i;
324 static __rte_always_inline int
325 enqueue_one_chain_job(struct qat_sym_session *ctx,
326 struct icp_qat_fw_la_bulk_req *req,
327 struct rte_crypto_vec *data,
328 uint16_t n_data_vecs,
329 struct rte_crypto_va_iova_ptr *cipher_iv,
330 struct rte_crypto_va_iova_ptr *digest,
331 struct rte_crypto_va_iova_ptr *auth_iv,
332 union rte_crypto_sym_ofs ofs, uint32_t data_len)
334 struct icp_qat_fw_la_cipher_req_params *cipher_param;
335 struct icp_qat_fw_la_auth_req_params *auth_param;
336 rte_iova_t auth_iova_end;
337 int32_t cipher_len, auth_len;
339 cipher_param = (void *)&req->serv_specif_rqpars;
340 auth_param = (void *)((uint8_t *)cipher_param +
341 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
343 cipher_len = data_len - ofs.ofs.cipher.head -
345 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
347 if (unlikely(cipher_len < 0 || auth_len < 0))
350 cipher_param->cipher_offset = ofs.ofs.cipher.head;
351 cipher_param->cipher_length = cipher_len;
352 set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
354 auth_param->auth_off = ofs.ofs.auth.head;
355 auth_param->auth_len = auth_len;
356 auth_param->auth_res_addr = digest->iova;
358 switch (ctx->qat_hash_alg) {
359 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
360 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
361 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
362 auth_param->u1.aad_adr = auth_iv->iova;
364 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
365 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
371 if (unlikely(n_data_vecs > 1)) {
372 int auth_end_get = 0, i = n_data_vecs - 1;
373 struct rte_crypto_vec *cvec = &data[0];
376 len = data_len - ofs.ofs.auth.tail;
378 while (i >= 0 && len > 0) {
379 if (cvec->len >= len) {
380 auth_iova_end = cvec->iova + len;
390 if (unlikely(auth_end_get == 0))
393 auth_iova_end = data[0].iova + auth_param->auth_off +
394 auth_param->auth_len;
396 /* Then check if digest-encrypted conditions are met */
397 if ((auth_param->auth_off + auth_param->auth_len <
398 cipher_param->cipher_offset +
399 cipher_param->cipher_length) &&
400 (digest->iova == auth_iova_end)) {
401 /* Handle partial digest encryption */
402 if (cipher_param->cipher_offset +
403 cipher_param->cipher_length <
404 auth_param->auth_off +
405 auth_param->auth_len +
407 req->comn_mid.dst_length =
408 req->comn_mid.src_length =
409 auth_param->auth_off +
410 auth_param->auth_len +
412 struct icp_qat_fw_comn_req_hdr *header =
414 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
415 header->serv_specif_flags,
416 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
422 static __rte_always_inline int
423 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
424 struct rte_crypto_vec *data, uint16_t n_data_vecs,
425 union rte_crypto_sym_ofs ofs,
426 struct rte_crypto_va_iova_ptr *cipher_iv,
427 struct rte_crypto_va_iova_ptr *digest,
428 struct rte_crypto_va_iova_ptr *auth_iv,
431 struct qat_qp *qp = qp_data;
432 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
433 struct qat_queue *tx_queue = &qp->tx_q;
434 struct qat_sym_session *ctx = dp_ctx->session;
435 struct icp_qat_fw_la_bulk_req *req;
437 uint32_t tail = dp_ctx->tail;
439 req = (struct icp_qat_fw_la_bulk_req *)(
440 (uint8_t *)tx_queue->base_addr + tail);
441 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
442 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
443 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
444 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
445 if (unlikely(data_len < 0))
447 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
449 if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
450 cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
454 dp_ctx->cached_enqueue++;
459 static __rte_always_inline uint32_t
460 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
461 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
462 void *user_data[], int *status)
464 struct qat_qp *qp = qp_data;
465 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
466 struct qat_queue *tx_queue = &qp->tx_q;
467 struct qat_sym_session *ctx = dp_ctx->session;
470 struct icp_qat_fw_la_bulk_req *req;
473 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
474 if (unlikely(n == 0)) {
475 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
482 for (i = 0; i < n; i++) {
483 req = (struct icp_qat_fw_la_bulk_req *)(
484 (uint8_t *)tx_queue->base_addr + tail);
485 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
487 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
489 if (unlikely(data_len < 0))
491 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
492 if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
493 vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
494 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
497 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
501 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
504 dp_ctx->cached_enqueue += i;
509 static __rte_always_inline void
510 enqueue_one_aead_job(struct qat_sym_session *ctx,
511 struct icp_qat_fw_la_bulk_req *req,
512 struct rte_crypto_va_iova_ptr *iv,
513 struct rte_crypto_va_iova_ptr *digest,
514 struct rte_crypto_va_iova_ptr *aad,
515 union rte_crypto_sym_ofs ofs, uint32_t data_len)
517 struct icp_qat_fw_la_cipher_req_params *cipher_param =
518 (void *)&req->serv_specif_rqpars;
519 struct icp_qat_fw_la_auth_req_params *auth_param =
520 (void *)((uint8_t *)&req->serv_specif_rqpars +
521 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
523 uint8_t aad_ccm_real_len;
524 uint8_t aad_len_field_sz;
526 rte_iova_t aad_iova = 0;
529 /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
530 if (ctx->is_single_pass) {
531 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
532 cipher_param->spc_aad_addr = aad->iova;
533 cipher_param->spc_auth_res_addr = digest->iova;
537 switch (ctx->qat_hash_alg) {
538 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
539 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
540 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
541 req->comn_hdr.serv_specif_flags,
542 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
543 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
544 ctx->cipher_iv.length);
545 aad_iova = aad->iova;
547 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
549 aad_iova = aad->iova;
550 aad_ccm_real_len = 0;
551 aad_len_field_sz = 0;
552 msg_len_be = rte_bswap32((uint32_t)data_len -
553 ofs.ofs.cipher.head);
555 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
556 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
557 aad_ccm_real_len = ctx->aad_len -
558 ICP_QAT_HW_CCM_AAD_B0_LEN -
559 ICP_QAT_HW_CCM_AAD_LEN_INFO;
565 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
566 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
567 aad_len_field_sz, ctx->digest_length, q);
568 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
569 memcpy(aad_data + ctx->cipher_iv.length +
570 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
571 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
572 (uint8_t *)&msg_len_be,
573 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
575 memcpy(aad_data + ctx->cipher_iv.length +
576 ICP_QAT_HW_CCM_NONCE_OFFSET,
577 (uint8_t *)&msg_len_be +
578 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
582 if (aad_len_field_sz > 0) {
583 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
584 rte_bswap16(aad_ccm_real_len);
586 if ((aad_ccm_real_len + aad_len_field_sz)
587 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
591 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
594 ICP_QAT_HW_CCM_AAD_B0_LEN);
595 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
598 memset(&aad_data[pad_idx], 0, pad_len);
602 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
603 + ICP_QAT_HW_CCM_NONCE_OFFSET,
605 ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
606 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
607 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
609 rte_memcpy((uint8_t *)aad->va +
610 ICP_QAT_HW_CCM_NONCE_OFFSET,
611 (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
612 ctx->cipher_iv.length);
618 cipher_param->cipher_offset = ofs.ofs.cipher.head;
619 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
621 auth_param->auth_off = ofs.ofs.cipher.head;
622 auth_param->auth_len = cipher_param->cipher_length;
623 auth_param->auth_res_addr = digest->iova;
624 auth_param->u1.aad_adr = aad_iova;
627 static __rte_always_inline int
628 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
629 struct rte_crypto_vec *data, uint16_t n_data_vecs,
630 union rte_crypto_sym_ofs ofs,
631 struct rte_crypto_va_iova_ptr *iv,
632 struct rte_crypto_va_iova_ptr *digest,
633 struct rte_crypto_va_iova_ptr *aad,
636 struct qat_qp *qp = qp_data;
637 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
638 struct qat_queue *tx_queue = &qp->tx_q;
639 struct qat_sym_session *ctx = dp_ctx->session;
640 struct icp_qat_fw_la_bulk_req *req;
642 uint32_t tail = dp_ctx->tail;
644 req = (struct icp_qat_fw_la_bulk_req *)(
645 (uint8_t *)tx_queue->base_addr + tail);
646 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
647 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
648 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
649 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
650 if (unlikely(data_len < 0))
652 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
654 enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
658 dp_ctx->cached_enqueue++;
663 static __rte_always_inline uint32_t
664 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
665 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
666 void *user_data[], int *status)
668 struct qat_qp *qp = qp_data;
669 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
670 struct qat_queue *tx_queue = &qp->tx_q;
671 struct qat_sym_session *ctx = dp_ctx->session;
674 struct icp_qat_fw_la_bulk_req *req;
677 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
678 if (unlikely(n == 0)) {
679 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
686 for (i = 0; i < n; i++) {
687 req = (struct icp_qat_fw_la_bulk_req *)(
688 (uint8_t *)tx_queue->base_addr + tail);
689 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
691 data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
693 if (unlikely(data_len < 0))
695 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
696 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
697 &vec->aad[i], ofs, (uint32_t)data_len);
698 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
702 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
705 dp_ctx->cached_enqueue += i;
710 static __rte_always_inline uint32_t
711 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
712 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
713 uint32_t max_nb_to_dequeue,
714 rte_cryptodev_raw_post_dequeue_t post_dequeue,
715 void **out_user_data, uint8_t is_user_data_array,
716 uint32_t *n_success_jobs, int *return_status)
718 struct qat_qp *qp = qp_data;
719 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
720 struct qat_queue *rx_queue = &qp->rx_q;
721 struct icp_qat_fw_comn_resp *resp;
723 uint32_t i, n, inflight;
731 inflight = qp->enqueued - qp->dequeued;
732 if (unlikely(inflight == 0))
735 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
737 /* no operation ready */
738 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
741 resp_opaque = (void *)(uintptr_t)resp->opaque_data;
742 /* get the dequeue count */
743 if (get_dequeue_count) {
744 n = get_dequeue_count(resp_opaque);
745 if (unlikely(n == 0))
748 if (unlikely(max_nb_to_dequeue == 0))
750 n = max_nb_to_dequeue;
753 out_user_data[0] = resp_opaque;
754 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
755 post_dequeue(resp_opaque, 0, status);
756 *n_success_jobs += status;
758 head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
760 /* we already finished dequeue when n == 1 */
761 if (unlikely(n == 1)) {
766 if (is_user_data_array) {
767 for (i = 1; i < n; i++) {
768 resp = (struct icp_qat_fw_comn_resp *)(
769 (uint8_t *)rx_queue->base_addr + head);
770 if (unlikely(*(uint32_t *)resp ==
773 out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
774 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
775 *n_success_jobs += status;
776 post_dequeue(out_user_data[i], i, status);
777 head = (head + rx_queue->msg_size) &
778 rx_queue->modulo_mask;
784 /* opaque is not array */
785 for (i = 1; i < n; i++) {
786 resp = (struct icp_qat_fw_comn_resp *)(
787 (uint8_t *)rx_queue->base_addr + head);
788 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
789 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
791 head = (head + rx_queue->msg_size) &
792 rx_queue->modulo_mask;
793 post_dequeue(resp_opaque, i, status);
794 *n_success_jobs += status;
799 dp_ctx->cached_dequeue += i;
803 static __rte_always_inline void *
804 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
805 enum rte_crypto_op_status *op_status)
807 struct qat_qp *qp = qp_data;
808 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
809 struct qat_queue *rx_queue = &qp->rx_q;
810 register struct icp_qat_fw_comn_resp *resp;
812 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
815 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
818 dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
819 rx_queue->modulo_mask;
820 dp_ctx->cached_dequeue++;
822 *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
823 RTE_CRYPTO_OP_STATUS_SUCCESS :
824 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
826 return (void *)(uintptr_t)resp->opaque_data;
829 static __rte_always_inline int
830 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
832 struct qat_qp *qp = qp_data;
833 struct qat_queue *tx_queue = &qp->tx_q;
834 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
836 if (unlikely(dp_ctx->cached_enqueue != n))
840 qp->stats.enqueued_count += n;
842 tx_queue->tail = dp_ctx->tail;
844 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
845 tx_queue->hw_bundle_number,
846 tx_queue->hw_queue_number, tx_queue->tail);
847 tx_queue->csr_tail = tx_queue->tail;
848 dp_ctx->cached_enqueue = 0;
853 static __rte_always_inline int
854 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
856 struct qat_qp *qp = qp_data;
857 struct qat_queue *rx_queue = &qp->rx_q;
858 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
860 if (unlikely(dp_ctx->cached_dequeue != n))
863 rx_queue->head = dp_ctx->head;
864 rx_queue->nb_processed_responses += n;
866 qp->stats.dequeued_count += n;
867 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
868 uint32_t old_head, new_head;
871 old_head = rx_queue->csr_head;
872 new_head = rx_queue->head;
873 max_head = qp->nb_descriptors * rx_queue->msg_size;
875 /* write out free descriptors */
876 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
878 if (new_head < old_head) {
879 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
880 max_head - old_head);
881 memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
884 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
887 rx_queue->nb_processed_responses = 0;
888 rx_queue->csr_head = new_head;
890 /* write current head to CSR */
891 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
892 rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
896 dp_ctx->cached_dequeue = 0;
901 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
902 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
903 enum rte_crypto_op_sess_type sess_type,
904 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
907 struct qat_sym_session *ctx;
908 struct qat_sym_dp_ctx *dp_ctx;
910 qp = dev->data->queue_pairs[qp_id];
911 dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
914 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
915 sizeof(struct qat_sym_dp_ctx));
916 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
917 dp_ctx->tail = qp->tx_q.tail;
918 dp_ctx->head = qp->rx_q.head;
919 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
922 if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
925 ctx = (struct qat_sym_session *)get_sym_session_private_data(
926 session_ctx.crypto_sess, qat_sym_driver_id);
928 dp_ctx->session = ctx;
930 raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
931 raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
932 raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
933 raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
935 if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
936 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
938 /* AES-GCM or AES-CCM */
939 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
940 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
941 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
942 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
943 && ctx->qat_hash_alg ==
944 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
945 raw_dp_ctx->enqueue_burst =
946 qat_sym_dp_enqueue_aead_jobs;
947 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
949 raw_dp_ctx->enqueue_burst =
950 qat_sym_dp_enqueue_chain_jobs;
951 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
953 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
954 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
955 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
956 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
957 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
958 ctx->qat_cipher_alg ==
959 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
960 raw_dp_ctx->enqueue_burst =
961 qat_sym_dp_enqueue_aead_jobs;
962 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
964 raw_dp_ctx->enqueue_burst =
965 qat_sym_dp_enqueue_cipher_jobs;
966 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
975 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
977 return sizeof(struct qat_sym_dp_ctx);