1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <cryptodev_pmd.h>
7 #include "adf_transport_access_macros.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_la.h"
12 #include "qat_sym_pmd.h"
13 #include "qat_sym_session.h"
16 struct qat_sym_dp_ctx {
17 struct qat_sym_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
24 static __rte_always_inline int32_t
25 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
26 struct rte_crypto_vec *data, uint16_t n_data_vecs)
28 struct qat_queue *tx_queue;
29 struct qat_sym_op_cookie *cookie;
34 if (likely(n_data_vecs == 1)) {
35 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
37 req->comn_mid.src_length = req->comn_mid.dst_length =
42 if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
48 ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
49 QAT_COMN_PTR_TYPE_SGL);
50 cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
51 list = (struct qat_sgl *)&cookie->qat_sgl_src;
53 for (i = 0; i < n_data_vecs; i++) {
54 list->buffers[i].len = data[i].len;
55 list->buffers[i].resrvd = 0;
56 list->buffers[i].addr = data[i].iova;
57 if (total_len + data[i].len > UINT32_MAX) {
58 QAT_DP_LOG(ERR, "Message too long");
61 total_len += data[i].len;
65 req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
66 cookie->qat_sgl_src_phys_addr;
67 req->comn_mid.src_length = req->comn_mid.dst_length = 0;
71 static __rte_always_inline void
72 set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
73 struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
74 struct icp_qat_fw_la_bulk_req *qat_req)
76 /* copy IV into request if it fits */
77 if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
78 rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
81 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
82 qat_req->comn_hdr.serv_specif_flags,
83 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
84 cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
88 #define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
89 (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
90 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
92 static __rte_always_inline void
93 qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
97 for (i = 0; i < n; i++)
101 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
102 RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
104 static __rte_always_inline void
105 enqueue_one_cipher_job(struct qat_sym_session *ctx,
106 struct icp_qat_fw_la_bulk_req *req,
107 struct rte_crypto_va_iova_ptr *iv,
108 union rte_crypto_sym_ofs ofs, uint32_t data_len)
110 struct icp_qat_fw_la_cipher_req_params *cipher_param;
112 cipher_param = (void *)&req->serv_specif_rqpars;
115 set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
116 cipher_param->cipher_offset = ofs.ofs.cipher.head;
117 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
121 static __rte_always_inline int
122 qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
123 struct rte_crypto_vec *data, uint16_t n_data_vecs,
124 union rte_crypto_sym_ofs ofs,
125 struct rte_crypto_va_iova_ptr *iv,
126 struct rte_crypto_va_iova_ptr *digest __rte_unused,
127 struct rte_crypto_va_iova_ptr *aad __rte_unused,
130 struct qat_qp *qp = qp_data;
131 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
132 struct qat_queue *tx_queue = &qp->tx_q;
133 struct qat_sym_session *ctx = dp_ctx->session;
134 struct icp_qat_fw_la_bulk_req *req;
136 uint32_t tail = dp_ctx->tail;
138 req = (struct icp_qat_fw_la_bulk_req *)(
139 (uint8_t *)tx_queue->base_addr + tail);
140 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
141 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
142 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
143 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
144 if (unlikely(data_len < 0))
146 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
148 enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
151 dp_ctx->cached_enqueue++;
156 static __rte_always_inline uint32_t
157 qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
158 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
159 void *user_data[], int *status)
161 struct qat_qp *qp = qp_data;
162 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
163 struct qat_queue *tx_queue = &qp->tx_q;
164 struct qat_sym_session *ctx = dp_ctx->session;
167 struct icp_qat_fw_la_bulk_req *req;
170 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
171 if (unlikely(n == 0)) {
172 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
179 for (i = 0; i < n; i++) {
180 req = (struct icp_qat_fw_la_bulk_req *)(
181 (uint8_t *)tx_queue->base_addr + tail);
182 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
184 data_len = qat_sym_dp_parse_data_vec(qp, req,
186 vec->src_sgl[i].num);
187 if (unlikely(data_len < 0))
189 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
190 enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
192 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
196 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
199 dp_ctx->cached_enqueue += i;
204 static __rte_always_inline void
205 enqueue_one_auth_job(struct qat_sym_session *ctx,
206 struct icp_qat_fw_la_bulk_req *req,
207 struct rte_crypto_va_iova_ptr *digest,
208 struct rte_crypto_va_iova_ptr *auth_iv,
209 union rte_crypto_sym_ofs ofs, uint32_t data_len)
211 struct icp_qat_fw_la_cipher_req_params *cipher_param;
212 struct icp_qat_fw_la_auth_req_params *auth_param;
214 cipher_param = (void *)&req->serv_specif_rqpars;
215 auth_param = (void *)((uint8_t *)cipher_param +
216 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
218 auth_param->auth_off = ofs.ofs.auth.head;
219 auth_param->auth_len = data_len - ofs.ofs.auth.head -
221 auth_param->auth_res_addr = digest->iova;
223 switch (ctx->qat_hash_alg) {
224 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
225 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
226 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
227 auth_param->u1.aad_adr = auth_iv->iova;
229 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
230 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
231 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
232 req->comn_hdr.serv_specif_flags,
233 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
234 rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
235 ctx->auth_iv.length);
242 static __rte_always_inline int
243 qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
244 struct rte_crypto_vec *data, uint16_t n_data_vecs,
245 union rte_crypto_sym_ofs ofs,
246 struct rte_crypto_va_iova_ptr *iv __rte_unused,
247 struct rte_crypto_va_iova_ptr *digest,
248 struct rte_crypto_va_iova_ptr *auth_iv,
251 struct qat_qp *qp = qp_data;
252 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
253 struct qat_queue *tx_queue = &qp->tx_q;
254 struct qat_sym_session *ctx = dp_ctx->session;
255 struct icp_qat_fw_la_bulk_req *req;
257 uint32_t tail = dp_ctx->tail;
259 req = (struct icp_qat_fw_la_bulk_req *)(
260 (uint8_t *)tx_queue->base_addr + tail);
261 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
262 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
263 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
264 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
265 if (unlikely(data_len < 0))
267 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
269 enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
273 dp_ctx->cached_enqueue++;
278 static __rte_always_inline uint32_t
279 qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
280 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
281 void *user_data[], int *status)
283 struct qat_qp *qp = qp_data;
284 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
285 struct qat_queue *tx_queue = &qp->tx_q;
286 struct qat_sym_session *ctx = dp_ctx->session;
289 struct icp_qat_fw_la_bulk_req *req;
292 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
293 if (unlikely(n == 0)) {
294 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
301 for (i = 0; i < n; i++) {
302 req = (struct icp_qat_fw_la_bulk_req *)(
303 (uint8_t *)tx_queue->base_addr + tail);
304 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
306 data_len = qat_sym_dp_parse_data_vec(qp, req,
308 vec->src_sgl[i].num);
309 if (unlikely(data_len < 0))
311 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
312 enqueue_one_auth_job(ctx, req, &vec->digest[i],
313 &vec->auth_iv[i], ofs, (uint32_t)data_len);
314 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
318 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
321 dp_ctx->cached_enqueue += i;
326 static __rte_always_inline int
327 enqueue_one_chain_job(struct qat_sym_session *ctx,
328 struct icp_qat_fw_la_bulk_req *req,
329 struct rte_crypto_vec *data,
330 uint16_t n_data_vecs,
331 struct rte_crypto_va_iova_ptr *cipher_iv,
332 struct rte_crypto_va_iova_ptr *digest,
333 struct rte_crypto_va_iova_ptr *auth_iv,
334 union rte_crypto_sym_ofs ofs, uint32_t data_len)
336 struct icp_qat_fw_la_cipher_req_params *cipher_param;
337 struct icp_qat_fw_la_auth_req_params *auth_param;
338 rte_iova_t auth_iova_end;
339 int32_t cipher_len, auth_len;
341 cipher_param = (void *)&req->serv_specif_rqpars;
342 auth_param = (void *)((uint8_t *)cipher_param +
343 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
345 cipher_len = data_len - ofs.ofs.cipher.head -
347 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
349 if (unlikely(cipher_len < 0 || auth_len < 0))
352 cipher_param->cipher_offset = ofs.ofs.cipher.head;
353 cipher_param->cipher_length = cipher_len;
354 set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
356 auth_param->auth_off = ofs.ofs.auth.head;
357 auth_param->auth_len = auth_len;
358 auth_param->auth_res_addr = digest->iova;
360 switch (ctx->qat_hash_alg) {
361 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
362 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
363 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
364 auth_param->u1.aad_adr = auth_iv->iova;
366 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
367 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
373 if (unlikely(n_data_vecs > 1)) {
374 int auth_end_get = 0, i = n_data_vecs - 1;
375 struct rte_crypto_vec *cvec = &data[0];
378 len = data_len - ofs.ofs.auth.tail;
380 while (i >= 0 && len > 0) {
381 if (cvec->len >= len) {
382 auth_iova_end = cvec->iova + len;
392 if (unlikely(auth_end_get == 0))
395 auth_iova_end = data[0].iova + auth_param->auth_off +
396 auth_param->auth_len;
398 /* Then check if digest-encrypted conditions are met */
399 if ((auth_param->auth_off + auth_param->auth_len <
400 cipher_param->cipher_offset +
401 cipher_param->cipher_length) &&
402 (digest->iova == auth_iova_end)) {
403 /* Handle partial digest encryption */
404 if (cipher_param->cipher_offset +
405 cipher_param->cipher_length <
406 auth_param->auth_off +
407 auth_param->auth_len +
409 req->comn_mid.dst_length =
410 req->comn_mid.src_length =
411 auth_param->auth_off +
412 auth_param->auth_len +
414 struct icp_qat_fw_comn_req_hdr *header =
416 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
417 header->serv_specif_flags,
418 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
424 static __rte_always_inline int
425 qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
426 struct rte_crypto_vec *data, uint16_t n_data_vecs,
427 union rte_crypto_sym_ofs ofs,
428 struct rte_crypto_va_iova_ptr *cipher_iv,
429 struct rte_crypto_va_iova_ptr *digest,
430 struct rte_crypto_va_iova_ptr *auth_iv,
433 struct qat_qp *qp = qp_data;
434 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
435 struct qat_queue *tx_queue = &qp->tx_q;
436 struct qat_sym_session *ctx = dp_ctx->session;
437 struct icp_qat_fw_la_bulk_req *req;
439 uint32_t tail = dp_ctx->tail;
441 req = (struct icp_qat_fw_la_bulk_req *)(
442 (uint8_t *)tx_queue->base_addr + tail);
443 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
444 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
445 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
446 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
447 if (unlikely(data_len < 0))
449 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
451 if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
452 cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
456 dp_ctx->cached_enqueue++;
461 static __rte_always_inline uint32_t
462 qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
463 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
464 void *user_data[], int *status)
466 struct qat_qp *qp = qp_data;
467 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
468 struct qat_queue *tx_queue = &qp->tx_q;
469 struct qat_sym_session *ctx = dp_ctx->session;
472 struct icp_qat_fw_la_bulk_req *req;
475 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
476 if (unlikely(n == 0)) {
477 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
484 for (i = 0; i < n; i++) {
485 req = (struct icp_qat_fw_la_bulk_req *)(
486 (uint8_t *)tx_queue->base_addr + tail);
487 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
489 data_len = qat_sym_dp_parse_data_vec(qp, req,
491 vec->src_sgl[i].num);
492 if (unlikely(data_len < 0))
494 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
495 if (unlikely(enqueue_one_chain_job(ctx, req,
496 vec->src_sgl[i].vec, vec->src_sgl[i].num,
497 &vec->iv[i], &vec->digest[i],
498 &vec->auth_iv[i], ofs, (uint32_t)data_len)))
501 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
505 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
508 dp_ctx->cached_enqueue += i;
513 static __rte_always_inline void
514 enqueue_one_aead_job(struct qat_sym_session *ctx,
515 struct icp_qat_fw_la_bulk_req *req,
516 struct rte_crypto_va_iova_ptr *iv,
517 struct rte_crypto_va_iova_ptr *digest,
518 struct rte_crypto_va_iova_ptr *aad,
519 union rte_crypto_sym_ofs ofs, uint32_t data_len)
521 struct icp_qat_fw_la_cipher_req_params *cipher_param =
522 (void *)&req->serv_specif_rqpars;
523 struct icp_qat_fw_la_auth_req_params *auth_param =
524 (void *)((uint8_t *)&req->serv_specif_rqpars +
525 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
527 uint8_t aad_ccm_real_len;
528 uint8_t aad_len_field_sz;
530 rte_iova_t aad_iova = 0;
533 /* CPM 1.7 uses single pass to treat AEAD as cipher operation */
534 if (ctx->is_single_pass) {
535 enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
536 cipher_param->spc_aad_addr = aad->iova;
537 cipher_param->spc_auth_res_addr = digest->iova;
541 switch (ctx->qat_hash_alg) {
542 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
543 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
544 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
545 req->comn_hdr.serv_specif_flags,
546 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
547 rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
548 ctx->cipher_iv.length);
549 aad_iova = aad->iova;
551 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
553 aad_iova = aad->iova;
554 aad_ccm_real_len = 0;
555 aad_len_field_sz = 0;
556 msg_len_be = rte_bswap32((uint32_t)data_len -
557 ofs.ofs.cipher.head);
559 if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
560 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
561 aad_ccm_real_len = ctx->aad_len -
562 ICP_QAT_HW_CCM_AAD_B0_LEN -
563 ICP_QAT_HW_CCM_AAD_LEN_INFO;
569 q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
570 aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
571 aad_len_field_sz, ctx->digest_length, q);
572 if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
573 memcpy(aad_data + ctx->cipher_iv.length +
574 ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
575 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
576 (uint8_t *)&msg_len_be,
577 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
579 memcpy(aad_data + ctx->cipher_iv.length +
580 ICP_QAT_HW_CCM_NONCE_OFFSET,
581 (uint8_t *)&msg_len_be +
582 (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
586 if (aad_len_field_sz > 0) {
587 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
588 rte_bswap16(aad_ccm_real_len);
590 if ((aad_ccm_real_len + aad_len_field_sz)
591 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
595 pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
598 ICP_QAT_HW_CCM_AAD_B0_LEN);
599 pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
602 memset(&aad_data[pad_idx], 0, pad_len);
606 rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
607 + ICP_QAT_HW_CCM_NONCE_OFFSET,
609 ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
610 *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
611 q - ICP_QAT_HW_CCM_NONCE_OFFSET;
613 rte_memcpy((uint8_t *)aad->va +
614 ICP_QAT_HW_CCM_NONCE_OFFSET,
615 (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
616 ctx->cipher_iv.length);
622 cipher_param->cipher_offset = ofs.ofs.cipher.head;
623 cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
625 auth_param->auth_off = ofs.ofs.cipher.head;
626 auth_param->auth_len = cipher_param->cipher_length;
627 auth_param->auth_res_addr = digest->iova;
628 auth_param->u1.aad_adr = aad_iova;
631 static __rte_always_inline int
632 qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
633 struct rte_crypto_vec *data, uint16_t n_data_vecs,
634 union rte_crypto_sym_ofs ofs,
635 struct rte_crypto_va_iova_ptr *iv,
636 struct rte_crypto_va_iova_ptr *digest,
637 struct rte_crypto_va_iova_ptr *aad,
640 struct qat_qp *qp = qp_data;
641 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
642 struct qat_queue *tx_queue = &qp->tx_q;
643 struct qat_sym_session *ctx = dp_ctx->session;
644 struct icp_qat_fw_la_bulk_req *req;
646 uint32_t tail = dp_ctx->tail;
648 req = (struct icp_qat_fw_la_bulk_req *)(
649 (uint8_t *)tx_queue->base_addr + tail);
650 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
651 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
652 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
653 data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
654 if (unlikely(data_len < 0))
656 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
658 enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
662 dp_ctx->cached_enqueue++;
667 static __rte_always_inline uint32_t
668 qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
669 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
670 void *user_data[], int *status)
672 struct qat_qp *qp = qp_data;
673 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
674 struct qat_queue *tx_queue = &qp->tx_q;
675 struct qat_sym_session *ctx = dp_ctx->session;
678 struct icp_qat_fw_la_bulk_req *req;
681 n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
682 if (unlikely(n == 0)) {
683 qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
690 for (i = 0; i < n; i++) {
691 req = (struct icp_qat_fw_la_bulk_req *)(
692 (uint8_t *)tx_queue->base_addr + tail);
693 rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
695 data_len = qat_sym_dp_parse_data_vec(qp, req,
697 vec->src_sgl[i].num);
698 if (unlikely(data_len < 0))
700 req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
701 enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
702 &vec->aad[i], ofs, (uint32_t)data_len);
703 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
707 qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
710 dp_ctx->cached_enqueue += i;
715 static __rte_always_inline uint32_t
716 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
717 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
718 uint32_t max_nb_to_dequeue,
719 rte_cryptodev_raw_post_dequeue_t post_dequeue,
720 void **out_user_data, uint8_t is_user_data_array,
721 uint32_t *n_success_jobs, int *return_status)
723 struct qat_qp *qp = qp_data;
724 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
725 struct qat_queue *rx_queue = &qp->rx_q;
726 struct icp_qat_fw_comn_resp *resp;
728 uint32_t i, n, inflight;
736 inflight = qp->enqueued - qp->dequeued;
737 if (unlikely(inflight == 0))
740 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
742 /* no operation ready */
743 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
746 resp_opaque = (void *)(uintptr_t)resp->opaque_data;
747 /* get the dequeue count */
748 if (get_dequeue_count) {
749 n = get_dequeue_count(resp_opaque);
750 if (unlikely(n == 0))
753 if (unlikely(max_nb_to_dequeue == 0))
755 n = max_nb_to_dequeue;
758 out_user_data[0] = resp_opaque;
759 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
760 post_dequeue(resp_opaque, 0, status);
761 *n_success_jobs += status;
763 head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
765 /* we already finished dequeue when n == 1 */
766 if (unlikely(n == 1)) {
771 if (is_user_data_array) {
772 for (i = 1; i < n; i++) {
773 resp = (struct icp_qat_fw_comn_resp *)(
774 (uint8_t *)rx_queue->base_addr + head);
775 if (unlikely(*(uint32_t *)resp ==
778 out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
779 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
780 *n_success_jobs += status;
781 post_dequeue(out_user_data[i], i, status);
782 head = (head + rx_queue->msg_size) &
783 rx_queue->modulo_mask;
789 /* opaque is not array */
790 for (i = 1; i < n; i++) {
791 resp = (struct icp_qat_fw_comn_resp *)(
792 (uint8_t *)rx_queue->base_addr + head);
793 status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
794 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
796 head = (head + rx_queue->msg_size) &
797 rx_queue->modulo_mask;
798 post_dequeue(resp_opaque, i, status);
799 *n_success_jobs += status;
804 dp_ctx->cached_dequeue += i;
808 static __rte_always_inline void *
809 qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
810 enum rte_crypto_op_status *op_status)
812 struct qat_qp *qp = qp_data;
813 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
814 struct qat_queue *rx_queue = &qp->rx_q;
815 register struct icp_qat_fw_comn_resp *resp;
817 resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
820 if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
823 dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
824 rx_queue->modulo_mask;
825 dp_ctx->cached_dequeue++;
827 *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
828 RTE_CRYPTO_OP_STATUS_SUCCESS :
829 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
831 return (void *)(uintptr_t)resp->opaque_data;
834 static __rte_always_inline int
835 qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
837 struct qat_qp *qp = qp_data;
838 struct qat_queue *tx_queue = &qp->tx_q;
839 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
841 if (unlikely(dp_ctx->cached_enqueue != n))
845 qp->stats.enqueued_count += n;
847 tx_queue->tail = dp_ctx->tail;
849 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
850 tx_queue->hw_bundle_number,
851 tx_queue->hw_queue_number, tx_queue->tail);
852 tx_queue->csr_tail = tx_queue->tail;
853 dp_ctx->cached_enqueue = 0;
858 static __rte_always_inline int
859 qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
861 struct qat_qp *qp = qp_data;
862 struct qat_queue *rx_queue = &qp->rx_q;
863 struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
865 if (unlikely(dp_ctx->cached_dequeue != n))
868 rx_queue->head = dp_ctx->head;
869 rx_queue->nb_processed_responses += n;
871 qp->stats.dequeued_count += n;
872 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
873 uint32_t old_head, new_head;
876 old_head = rx_queue->csr_head;
877 new_head = rx_queue->head;
878 max_head = qp->nb_descriptors * rx_queue->msg_size;
880 /* write out free descriptors */
881 void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
883 if (new_head < old_head) {
884 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
885 max_head - old_head);
886 memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
889 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
892 rx_queue->nb_processed_responses = 0;
893 rx_queue->csr_head = new_head;
895 /* write current head to CSR */
896 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
897 rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
901 dp_ctx->cached_dequeue = 0;
906 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
907 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
908 enum rte_crypto_op_sess_type sess_type,
909 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
912 struct qat_sym_session *ctx;
913 struct qat_sym_dp_ctx *dp_ctx;
915 qp = dev->data->queue_pairs[qp_id];
916 dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
919 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
920 sizeof(struct qat_sym_dp_ctx));
921 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
922 dp_ctx->tail = qp->tx_q.tail;
923 dp_ctx->head = qp->rx_q.head;
924 dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
927 if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
930 ctx = (struct qat_sym_session *)get_sym_session_private_data(
931 session_ctx.crypto_sess, qat_sym_driver_id);
933 dp_ctx->session = ctx;
935 raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
936 raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
937 raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
938 raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
940 if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
941 ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
943 /* AES-GCM or AES-CCM */
944 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
945 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
946 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
947 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
948 && ctx->qat_hash_alg ==
949 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
950 raw_dp_ctx->enqueue_burst =
951 qat_sym_dp_enqueue_aead_jobs;
952 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
954 raw_dp_ctx->enqueue_burst =
955 qat_sym_dp_enqueue_chain_jobs;
956 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
958 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
959 raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
960 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
961 } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
962 if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
963 ctx->qat_cipher_alg ==
964 ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
965 raw_dp_ctx->enqueue_burst =
966 qat_sym_dp_enqueue_aead_jobs;
967 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
969 raw_dp_ctx->enqueue_burst =
970 qat_sym_dp_enqueue_cipher_jobs;
971 raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
980 qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
982 return sizeof(struct qat_sym_dp_ctx);