1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
12 #define SRC_IOV_SIZE \
13 (sizeof(struct roc_se_iov_ptr) + \
14 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE \
16 (sizeof(struct roc_se_iov_ptr) + \
17 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
21 uint16_t zsk_flag : 4;
24 uint16_t chacha_poly : 1;
31 uint8_t auth_iv_length;
33 uint16_t auth_iv_offset;
36 struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
42 uint16_t mac_len = auth->digest_length;
46 case RTE_CRYPTO_AUTH_MD5:
47 case RTE_CRYPTO_AUTH_MD5_HMAC:
48 ret = (mac_len == 16) ? 0 : -1;
50 case RTE_CRYPTO_AUTH_SHA1:
51 case RTE_CRYPTO_AUTH_SHA1_HMAC:
52 ret = (mac_len == 20) ? 0 : -1;
54 case RTE_CRYPTO_AUTH_SHA224:
55 case RTE_CRYPTO_AUTH_SHA224_HMAC:
56 ret = (mac_len == 28) ? 0 : -1;
58 case RTE_CRYPTO_AUTH_SHA256:
59 case RTE_CRYPTO_AUTH_SHA256_HMAC:
60 ret = (mac_len == 32) ? 0 : -1;
62 case RTE_CRYPTO_AUTH_SHA384:
63 case RTE_CRYPTO_AUTH_SHA384_HMAC:
64 ret = (mac_len == 48) ? 0 : -1;
66 case RTE_CRYPTO_AUTH_SHA512:
67 case RTE_CRYPTO_AUTH_SHA512_HMAC:
68 ret = (mac_len == 64) ? 0 : -1;
70 case RTE_CRYPTO_AUTH_NULL:
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
83 struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
91 struct roc_se_sglist_comp *to = &list[i >> 2];
93 to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94 to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101 struct roc_se_buf_ptr *from)
103 struct roc_se_sglist_comp *to = &list[i >> 2];
105 to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113 struct roc_se_buf_ptr *from, uint32_t *psize)
115 struct roc_se_sglist_comp *to = &list[i >> 2];
116 uint32_t size = *psize;
119 e_len = (size > from->size) ? from->size : size;
120 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
128 * This fills the MC expected SGIO list
129 * from IOV given by user.
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133 struct roc_se_iov_ptr *from, uint32_t from_offset,
134 uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135 uint32_t extra_offset)
138 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139 uint32_t size = *psize;
140 struct roc_se_buf_ptr *bufs;
143 for (j = 0; (j < from->buf_cnt) && size; j++) {
146 struct roc_se_sglist_comp *to = &list[i >> 2];
148 if (unlikely(from_offset)) {
149 if (from_offset >= bufs[j].size) {
150 from_offset -= bufs[j].size;
153 e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154 e_len = (size > (bufs[j].size - from_offset)) ?
155 (bufs[j].size - from_offset) :
159 e_vaddr = (uint64_t)bufs[j].vaddr;
160 e_len = (size > bufs[j].size) ? bufs[j].size : size;
163 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
166 if (extra_len && (e_len >= extra_offset)) {
167 /* Break the data at given offset */
168 uint32_t next_len = e_len - extra_offset;
169 uint64_t next_vaddr = e_vaddr + extra_offset;
174 e_len = extra_offset;
176 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
179 extra_len = RTE_MIN(extra_len, size);
180 /* Insert extra data ptr */
185 rte_cpu_to_be_16(extra_len);
186 to->ptr[i % 4] = rte_cpu_to_be_64(
187 (uint64_t)extra_buf->vaddr);
191 next_len = RTE_MIN(next_len, size);
192 /* insert the rest of the data */
196 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
206 extra_offset -= size;
214 static __rte_always_inline int
215 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
216 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
218 uint32_t iv_offset = 0;
219 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
220 struct roc_se_ctx *se_ctx;
221 uint32_t cipher_type, hash_type;
222 uint32_t mac_len, size;
224 struct roc_se_buf_ptr *aad_buf = NULL;
225 uint32_t encr_offset, auth_offset;
226 uint32_t encr_data_len, auth_data_len, aad_len = 0;
227 uint32_t passthrough_len = 0;
228 union cpt_inst_w4 cpt_inst_w4;
232 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
233 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
234 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
235 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
236 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
237 /* We don't support both AAD and auth data separately */
240 aad_len = fc_params->aad_buf.size;
241 aad_buf = &fc_params->aad_buf;
243 se_ctx = fc_params->ctx_buf.vaddr;
244 cipher_type = se_ctx->enc_cipher;
245 hash_type = se_ctx->hash_type;
246 mac_len = se_ctx->mac_len;
247 op_minor = se_ctx->template_w4.s.opcode_minor;
249 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
251 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
254 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
256 * When AAD is given, data above encr_offset is pass through
257 * Since AAD is given as separate pointer and not as offset,
258 * this is a special case as we need to fragment input data
259 * into passthrough + encr_data and then insert AAD in between.
261 if (hash_type != ROC_SE_GMAC_TYPE) {
262 passthrough_len = encr_offset;
263 auth_offset = passthrough_len + iv_len;
264 encr_offset = passthrough_len + aad_len + iv_len;
265 auth_data_len = aad_len + encr_data_len;
267 passthrough_len = 16 + aad_len;
268 auth_offset = passthrough_len + iv_len;
269 auth_data_len = aad_len;
272 encr_offset += iv_len;
273 auth_offset += iv_len;
277 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
278 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
279 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
281 if (hash_type == ROC_SE_GMAC_TYPE) {
286 auth_dlen = auth_offset + auth_data_len;
287 enc_dlen = encr_data_len + encr_offset;
288 if (unlikely(encr_data_len & 0xf)) {
289 if ((cipher_type == ROC_SE_DES3_CBC) ||
290 (cipher_type == ROC_SE_DES3_ECB))
292 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
293 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
294 (cipher_type == ROC_SE_AES_ECB)))
296 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
299 if (unlikely(auth_dlen > enc_dlen)) {
300 inputlen = auth_dlen;
301 outputlen = auth_dlen + mac_len;
304 outputlen = enc_dlen + mac_len;
307 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
308 outputlen = enc_dlen;
311 cpt_inst_w4.s.param1 = encr_data_len;
312 cpt_inst_w4.s.param2 = auth_data_len;
315 * In cn9k, cn10k since we have a limitation of
316 * IV & Offset control word not part of instruction
317 * and need to be part of Data Buffer, we check if
318 * head room is there and then only do the Direct mode processing
320 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
321 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
322 void *dm_vaddr = fc_params->bufs[0].vaddr;
324 /* Use Direct mode */
327 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
330 inst->dptr = (uint64_t)offset_vaddr;
332 /* RPTR should just exclude offset control word */
333 inst->rptr = (uint64_t)dm_vaddr - iv_len;
335 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
337 if (likely(iv_len)) {
338 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
339 ROC_SE_OFF_CTRL_LEN);
340 uint64_t *src = fc_params->iv_buf;
346 void *m_vaddr = fc_params->meta_buf.vaddr;
347 uint32_t i, g_size_bytes, s_size_bytes;
348 struct roc_se_sglist_comp *gather_comp;
349 struct roc_se_sglist_comp *scatter_comp;
352 /* This falls under strict SG mode */
353 offset_vaddr = m_vaddr;
354 size = ROC_SE_OFF_CTRL_LEN + iv_len;
356 m_vaddr = (uint8_t *)m_vaddr + size;
358 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
360 if (likely(iv_len)) {
361 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
362 ROC_SE_OFF_CTRL_LEN);
363 uint64_t *src = fc_params->iv_buf;
368 /* DPTR has SG list */
371 ((uint16_t *)in_buffer)[0] = 0;
372 ((uint16_t *)in_buffer)[1] = 0;
374 /* TODO Add error check if space will be sufficient */
376 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
384 /* Offset control word that includes iv */
385 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
386 ROC_SE_OFF_CTRL_LEN + iv_len);
389 size = inputlen - iv_len;
391 uint32_t aad_offset = aad_len ? passthrough_len : 0;
393 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
394 i = fill_sg_comp_from_buf_min(
395 gather_comp, i, fc_params->bufs, &size);
397 i = fill_sg_comp_from_iov(
398 gather_comp, i, fc_params->src_iov, 0,
399 &size, aad_buf, aad_offset);
402 if (unlikely(size)) {
403 plt_dp_err("Insufficient buffer space,"
409 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
411 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
414 * Output Scatter list
418 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
422 if (likely(iv_len)) {
423 i = fill_sg_comp(scatter_comp, i,
424 (uint64_t)offset_vaddr +
429 /* output data or output data + digest*/
430 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
431 size = outputlen - iv_len - mac_len;
433 uint32_t aad_offset =
434 aad_len ? passthrough_len : 0;
437 ROC_SE_SINGLE_BUF_INPLACE)) {
438 i = fill_sg_comp_from_buf_min(
440 fc_params->bufs, &size);
442 i = fill_sg_comp_from_iov(
444 fc_params->dst_iov, 0, &size,
445 aad_buf, aad_offset);
447 if (unlikely(size)) {
448 plt_dp_err("Insufficient buffer"
449 " space, size %d needed",
456 i = fill_sg_comp_from_buf(scatter_comp, i,
457 &fc_params->mac_buf);
460 /* Output including mac */
461 size = outputlen - iv_len;
463 uint32_t aad_offset =
464 aad_len ? passthrough_len : 0;
467 ROC_SE_SINGLE_BUF_INPLACE)) {
468 i = fill_sg_comp_from_buf_min(
470 fc_params->bufs, &size);
472 i = fill_sg_comp_from_iov(
474 fc_params->dst_iov, 0, &size,
475 aad_buf, aad_offset);
477 if (unlikely(size)) {
478 plt_dp_err("Insufficient buffer"
479 " space, size %d needed",
485 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
487 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
489 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
491 /* This is DPTR len in case of SG mode */
492 cpt_inst_w4.s.dlen = size;
494 inst->dptr = (uint64_t)in_buffer;
497 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
498 (auth_offset >> 8))) {
499 plt_dp_err("Offset not supported");
500 plt_dp_err("enc_offset: %d", encr_offset);
501 plt_dp_err("iv_offset : %d", iv_offset);
502 plt_dp_err("auth_offset: %d", auth_offset);
506 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
507 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
508 ((uint64_t)auth_offset));
510 inst->w4.u64 = cpt_inst_w4.u64;
514 static __rte_always_inline int
515 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
516 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
518 uint32_t iv_offset = 0, size;
519 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
520 struct roc_se_ctx *se_ctx;
521 int32_t hash_type, mac_len;
523 struct roc_se_buf_ptr *aad_buf = NULL;
524 uint32_t encr_offset, auth_offset;
525 uint32_t encr_data_len, auth_data_len, aad_len = 0;
526 uint32_t passthrough_len = 0;
527 union cpt_inst_w4 cpt_inst_w4;
531 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
532 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
533 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
534 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
536 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
537 /* We don't support both AAD and auth data separately */
540 aad_len = fc_params->aad_buf.size;
541 aad_buf = &fc_params->aad_buf;
544 se_ctx = fc_params->ctx_buf.vaddr;
545 hash_type = se_ctx->hash_type;
546 mac_len = se_ctx->mac_len;
547 op_minor = se_ctx->template_w4.s.opcode_minor;
549 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
551 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
554 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
556 * When AAD is given, data above encr_offset is pass through
557 * Since AAD is given as separate pointer and not as offset,
558 * this is a special case as we need to fragment input data
559 * into passthrough + encr_data and then insert AAD in between.
561 if (hash_type != ROC_SE_GMAC_TYPE) {
562 passthrough_len = encr_offset;
563 auth_offset = passthrough_len + iv_len;
564 encr_offset = passthrough_len + aad_len + iv_len;
565 auth_data_len = aad_len + encr_data_len;
567 passthrough_len = 16 + aad_len;
568 auth_offset = passthrough_len + iv_len;
569 auth_data_len = aad_len;
572 encr_offset += iv_len;
573 auth_offset += iv_len;
577 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
578 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
579 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
581 if (hash_type == ROC_SE_GMAC_TYPE) {
586 enc_dlen = encr_offset + encr_data_len;
587 auth_dlen = auth_offset + auth_data_len;
589 if (auth_dlen > enc_dlen) {
590 inputlen = auth_dlen + mac_len;
591 outputlen = auth_dlen;
593 inputlen = enc_dlen + mac_len;
594 outputlen = enc_dlen;
597 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
598 outputlen = inputlen = enc_dlen;
600 cpt_inst_w4.s.param1 = encr_data_len;
601 cpt_inst_w4.s.param2 = auth_data_len;
604 * In cn9k, cn10k since we have a limitation of
605 * IV & Offset control word not part of instruction
606 * and need to be part of Data Buffer, we check if
607 * head room is there and then only do the Direct mode processing
609 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
610 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
611 void *dm_vaddr = fc_params->bufs[0].vaddr;
613 /* Use Direct mode */
616 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
617 inst->dptr = (uint64_t)offset_vaddr;
619 /* RPTR should just exclude offset control word */
620 inst->rptr = (uint64_t)dm_vaddr - iv_len;
622 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
624 if (likely(iv_len)) {
625 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
626 ROC_SE_OFF_CTRL_LEN);
627 uint64_t *src = fc_params->iv_buf;
633 void *m_vaddr = fc_params->meta_buf.vaddr;
634 uint32_t g_size_bytes, s_size_bytes;
635 struct roc_se_sglist_comp *gather_comp;
636 struct roc_se_sglist_comp *scatter_comp;
640 /* This falls under strict SG mode */
641 offset_vaddr = m_vaddr;
642 size = ROC_SE_OFF_CTRL_LEN + iv_len;
644 m_vaddr = (uint8_t *)m_vaddr + size;
646 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
648 if (likely(iv_len)) {
649 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
650 ROC_SE_OFF_CTRL_LEN);
651 uint64_t *src = fc_params->iv_buf;
656 /* DPTR has SG list */
659 ((uint16_t *)in_buffer)[0] = 0;
660 ((uint16_t *)in_buffer)[1] = 0;
662 /* TODO Add error check if space will be sufficient */
664 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
671 /* Offset control word that includes iv */
672 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
673 ROC_SE_OFF_CTRL_LEN + iv_len);
676 if (flags & ROC_SE_VALID_MAC_BUF) {
677 size = inputlen - iv_len - mac_len;
679 /* input data only */
681 ROC_SE_SINGLE_BUF_INPLACE)) {
682 i = fill_sg_comp_from_buf_min(
683 gather_comp, i, fc_params->bufs,
686 uint32_t aad_offset =
687 aad_len ? passthrough_len : 0;
689 i = fill_sg_comp_from_iov(
691 fc_params->src_iov, 0, &size,
692 aad_buf, aad_offset);
694 if (unlikely(size)) {
695 plt_dp_err("Insufficient buffer"
696 " space, size %d needed",
704 i = fill_sg_comp_from_buf(gather_comp, i,
705 &fc_params->mac_buf);
708 /* input data + mac */
709 size = inputlen - iv_len;
712 ROC_SE_SINGLE_BUF_INPLACE)) {
713 i = fill_sg_comp_from_buf_min(
714 gather_comp, i, fc_params->bufs,
717 uint32_t aad_offset =
718 aad_len ? passthrough_len : 0;
720 if (unlikely(!fc_params->src_iov)) {
721 plt_dp_err("Bad input args");
725 i = fill_sg_comp_from_iov(
727 fc_params->src_iov, 0, &size,
728 aad_buf, aad_offset);
731 if (unlikely(size)) {
732 plt_dp_err("Insufficient buffer"
733 " space, size %d needed",
739 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
741 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
744 * Output Scatter List
749 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
754 i = fill_sg_comp(scatter_comp, i,
755 (uint64_t)offset_vaddr +
760 /* Add output data */
761 size = outputlen - iv_len;
763 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
764 /* handle single buffer here */
765 i = fill_sg_comp_from_buf_min(scatter_comp, i,
769 uint32_t aad_offset =
770 aad_len ? passthrough_len : 0;
772 if (unlikely(!fc_params->dst_iov)) {
773 plt_dp_err("Bad input args");
777 i = fill_sg_comp_from_iov(
778 scatter_comp, i, fc_params->dst_iov, 0,
779 &size, aad_buf, aad_offset);
782 if (unlikely(size)) {
783 plt_dp_err("Insufficient buffer space,"
790 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
792 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
794 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
796 /* This is DPTR len in case of SG mode */
797 cpt_inst_w4.s.dlen = size;
799 inst->dptr = (uint64_t)in_buffer;
802 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
803 (auth_offset >> 8))) {
804 plt_dp_err("Offset not supported");
805 plt_dp_err("enc_offset: %d", encr_offset);
806 plt_dp_err("iv_offset : %d", iv_offset);
807 plt_dp_err("auth_offset: %d", auth_offset);
811 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
812 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
813 ((uint64_t)auth_offset));
815 inst->w4.u64 = cpt_inst_w4.u64;
819 static __rte_always_inline int
820 cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
821 struct roc_se_fc_params *params,
822 struct cpt_inst_s *inst)
825 int32_t inputlen, outputlen;
826 struct roc_se_ctx *se_ctx;
827 uint32_t mac_len = 0;
828 uint8_t pdcp_alg_type, j;
829 uint32_t encr_offset = 0, auth_offset = 0;
830 uint32_t encr_data_len = 0, auth_data_len = 0;
831 int flags, iv_len = 16;
832 uint64_t offset_ctrl;
833 uint64_t *offset_vaddr;
834 uint32_t *iv_s, iv[4];
835 union cpt_inst_w4 cpt_inst_w4;
837 se_ctx = params->ctx_buf.vaddr;
838 flags = se_ctx->zsk_flags;
839 mac_len = se_ctx->mac_len;
840 pdcp_alg_type = se_ctx->pdcp_alg_type;
842 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
844 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
846 cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
847 (0 << 4) | (0 << 3) | (flags & 0x7));
851 * Microcode expects offsets in bytes
854 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
857 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
858 auth_offset = auth_offset / 8;
860 /* consider iv len */
861 auth_offset += iv_len;
863 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
866 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
871 * Microcode expects offsets in bytes
874 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
876 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
877 encr_offset = encr_offset / 8;
878 /* consider iv len */
879 encr_offset += iv_len;
881 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
882 outputlen = inputlen;
885 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
888 if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
889 plt_dp_err("Offset not supported");
890 plt_dp_err("enc_offset: %d", encr_offset);
891 plt_dp_err("auth_offset: %d", auth_offset);
896 iv_s = (flags == 0x1) ? params->auth_iv_buf : params->iv_buf;
898 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
900 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
901 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
904 for (j = 0; j < 4; j++)
907 /* ZUC doesn't need a swap */
908 for (j = 0; j < 4; j++)
913 * GP op header, lengths are expected in bits.
915 cpt_inst_w4.s.param1 = encr_data_len;
916 cpt_inst_w4.s.param2 = auth_data_len;
919 * In cn9k, cn10k since we have a limitation of
920 * IV & Offset control word not part of instruction
921 * and need to be part of Data Buffer, we check if
922 * head room is there and then only do the Direct mode processing
924 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
925 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
926 void *dm_vaddr = params->bufs[0].vaddr;
928 /* Use Direct mode */
930 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
931 ROC_SE_OFF_CTRL_LEN - iv_len);
934 inst->dptr = (uint64_t)offset_vaddr;
935 /* RPTR should just exclude offset control word */
936 inst->rptr = (uint64_t)dm_vaddr - iv_len;
938 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
940 if (likely(iv_len)) {
941 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
942 ROC_SE_OFF_CTRL_LEN);
943 memcpy(iv_d, iv, 16);
946 *offset_vaddr = offset_ctrl;
948 void *m_vaddr = params->meta_buf.vaddr;
949 uint32_t i, g_size_bytes, s_size_bytes;
950 struct roc_se_sglist_comp *gather_comp;
951 struct roc_se_sglist_comp *scatter_comp;
955 /* save space for iv */
956 offset_vaddr = m_vaddr;
958 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
960 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
962 /* DPTR has SG list */
965 ((uint16_t *)in_buffer)[0] = 0;
966 ((uint16_t *)in_buffer)[1] = 0;
968 /* TODO Add error check if space will be sufficient */
970 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
977 /* Offset control word followed by iv */
979 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
980 ROC_SE_OFF_CTRL_LEN + iv_len);
983 *offset_vaddr = offset_ctrl;
985 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
986 ROC_SE_OFF_CTRL_LEN);
987 memcpy(iv_d, iv, 16);
990 size = inputlen - iv_len;
992 i = fill_sg_comp_from_iov(gather_comp, i,
993 params->src_iov, 0, &size,
995 if (unlikely(size)) {
996 plt_dp_err("Insufficient buffer space,"
1002 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1004 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1007 * Output Scatter List
1012 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1016 /* IV in SLIST only for EEA3 & UEA2 */
1021 i = fill_sg_comp(scatter_comp, i,
1022 (uint64_t)offset_vaddr +
1023 ROC_SE_OFF_CTRL_LEN,
1027 /* Add output data */
1028 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1029 size = outputlen - iv_len - mac_len;
1031 i = fill_sg_comp_from_iov(scatter_comp, i,
1035 if (unlikely(size)) {
1036 plt_dp_err("Insufficient buffer space,"
1045 i = fill_sg_comp_from_buf(scatter_comp, i,
1049 /* Output including mac */
1050 size = outputlen - iv_len;
1052 i = fill_sg_comp_from_iov(scatter_comp, i,
1056 if (unlikely(size)) {
1057 plt_dp_err("Insufficient buffer space,"
1064 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1066 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1068 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1070 /* This is DPTR len in case of SG mode */
1071 cpt_inst_w4.s.dlen = size;
1073 inst->dptr = (uint64_t)in_buffer;
1076 inst->w4.u64 = cpt_inst_w4.u64;
1081 static __rte_always_inline int
1082 cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1083 struct roc_se_fc_params *params,
1084 struct cpt_inst_s *inst)
1087 int32_t inputlen = 0, outputlen;
1088 struct roc_se_ctx *se_ctx;
1089 uint8_t pdcp_alg_type, iv_len = 16;
1090 uint32_t encr_offset;
1091 uint32_t encr_data_len;
1093 uint64_t *offset_vaddr;
1094 uint32_t *iv_s, iv[4], j;
1095 union cpt_inst_w4 cpt_inst_w4;
1098 * Microcode expects offsets in bytes
1099 * TODO: Rounding off
1101 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1102 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1104 se_ctx = params->ctx_buf.vaddr;
1105 flags = se_ctx->zsk_flags;
1106 pdcp_alg_type = se_ctx->pdcp_alg_type;
1108 cpt_inst_w4.u64 = 0;
1109 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1111 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1113 cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
1114 (0 << 4) | (0 << 3) | (flags & 0x7));
1116 /* consider iv len */
1117 encr_offset += iv_len;
1119 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1120 outputlen = inputlen;
1123 iv_s = params->iv_buf;
1124 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1126 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1127 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1130 for (j = 0; j < 4; j++)
1131 iv[j] = iv_s[3 - j];
1133 /* ZUC doesn't need a swap */
1134 for (j = 0; j < 4; j++)
1139 * GP op header, lengths are expected in bits.
1141 cpt_inst_w4.s.param1 = encr_data_len;
1144 * In cn9k, cn10k since we have a limitation of
1145 * IV & Offset control word not part of instruction
1146 * and need to be part of Data Buffer, we check if
1147 * head room is there and then only do the Direct mode processing
1149 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1150 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1151 void *dm_vaddr = params->bufs[0].vaddr;
1153 /* Use Direct mode */
1155 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1156 ROC_SE_OFF_CTRL_LEN - iv_len);
1159 inst->dptr = (uint64_t)offset_vaddr;
1161 /* RPTR should just exclude offset control word */
1162 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1164 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1166 if (likely(iv_len)) {
1167 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1168 ROC_SE_OFF_CTRL_LEN);
1169 memcpy(iv_d, iv, 16);
1172 /* iv offset is 0 */
1173 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1175 void *m_vaddr = params->meta_buf.vaddr;
1176 uint32_t i, g_size_bytes, s_size_bytes;
1177 struct roc_se_sglist_comp *gather_comp;
1178 struct roc_se_sglist_comp *scatter_comp;
1182 /* save space for offset and iv... */
1183 offset_vaddr = m_vaddr;
1185 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1187 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1189 /* DPTR has SG list */
1190 in_buffer = m_vaddr;
1192 ((uint16_t *)in_buffer)[0] = 0;
1193 ((uint16_t *)in_buffer)[1] = 0;
1195 /* TODO Add error check if space will be sufficient */
1197 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1204 /* Offset control word */
1206 /* iv offset is 0 */
1207 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1209 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1210 ROC_SE_OFF_CTRL_LEN + iv_len);
1212 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1213 ROC_SE_OFF_CTRL_LEN);
1214 memcpy(iv_d, iv, 16);
1216 /* Add input data */
1217 size = inputlen - iv_len;
1219 i = fill_sg_comp_from_iov(gather_comp, i,
1220 params->src_iov, 0, &size,
1222 if (unlikely(size)) {
1223 plt_dp_err("Insufficient buffer space,"
1229 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1231 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1234 * Output Scatter List
1239 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1243 i = fill_sg_comp(scatter_comp, i,
1244 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1247 /* Add output data */
1248 size = outputlen - iv_len;
1250 i = fill_sg_comp_from_iov(scatter_comp, i,
1251 params->dst_iov, 0, &size,
1254 if (unlikely(size)) {
1255 plt_dp_err("Insufficient buffer space,"
1261 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1263 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1265 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1267 /* This is DPTR len in case of SG mode */
1268 cpt_inst_w4.s.dlen = size;
1270 inst->dptr = (uint64_t)in_buffer;
1273 if (unlikely((encr_offset >> 16))) {
1274 plt_dp_err("Offset not supported");
1275 plt_dp_err("enc_offset: %d", encr_offset);
1279 inst->w4.u64 = cpt_inst_w4.u64;
1284 static __rte_always_inline int
1285 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1286 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1288 void *m_vaddr = params->meta_buf.vaddr;
1290 int32_t inputlen = 0, outputlen = 0;
1291 struct roc_se_ctx *se_ctx;
1292 uint32_t mac_len = 0;
1294 uint32_t encr_offset, auth_offset;
1295 uint32_t encr_data_len, auth_data_len;
1297 uint8_t *iv_s, *iv_d, iv_len = 8;
1299 uint64_t *offset_vaddr;
1300 union cpt_inst_w4 cpt_inst_w4;
1302 uint32_t g_size_bytes, s_size_bytes;
1303 struct roc_se_sglist_comp *gather_comp;
1304 struct roc_se_sglist_comp *scatter_comp;
1306 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1307 auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1308 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1309 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1311 se_ctx = params->ctx_buf.vaddr;
1312 flags = se_ctx->zsk_flags;
1313 mac_len = se_ctx->mac_len;
1316 iv_s = params->iv_buf;
1318 iv_s = params->auth_iv_buf;
1320 dir = iv_s[8] & 0x1;
1322 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1324 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1325 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1326 (dir << 4) | (0 << 3) | (flags & 0x7));
1329 * GP op header, lengths are expected in bits.
1331 cpt_inst_w4.s.param1 = encr_data_len;
1332 cpt_inst_w4.s.param2 = auth_data_len;
1334 /* consider iv len */
1336 encr_offset += iv_len;
1337 auth_offset += iv_len;
1340 /* save space for offset ctrl and iv */
1341 offset_vaddr = m_vaddr;
1343 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1345 /* DPTR has SG list */
1346 in_buffer = m_vaddr;
1348 ((uint16_t *)in_buffer)[0] = 0;
1349 ((uint16_t *)in_buffer)[1] = 0;
1351 /* TODO Add error check if space will be sufficient */
1352 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1359 /* Offset control word followed by iv */
1362 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1363 outputlen = inputlen;
1364 /* iv offset is 0 */
1365 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1366 if (unlikely((encr_offset >> 16))) {
1367 plt_dp_err("Offset not supported");
1368 plt_dp_err("enc_offset: %d", encr_offset);
1372 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1373 outputlen = mac_len;
1374 /* iv offset is 0 */
1375 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1376 if (unlikely((auth_offset >> 8))) {
1377 plt_dp_err("Offset not supported");
1378 plt_dp_err("auth_offset: %d", auth_offset);
1383 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1384 ROC_SE_OFF_CTRL_LEN + iv_len);
1387 iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1388 memcpy(iv_d, iv_s, iv_len);
1391 size = inputlen - iv_len;
1393 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1396 if (unlikely(size)) {
1397 plt_dp_err("Insufficient buffer space,"
1403 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1404 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1407 * Output Scatter List
1411 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1415 /* IV in SLIST only for F8 */
1421 i = fill_sg_comp(scatter_comp, i,
1422 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1426 /* Add output data */
1427 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1428 size = outputlen - iv_len - mac_len;
1430 i = fill_sg_comp_from_iov(scatter_comp, i,
1431 params->dst_iov, 0, &size,
1434 if (unlikely(size)) {
1435 plt_dp_err("Insufficient buffer space,"
1444 i = fill_sg_comp_from_buf(scatter_comp, i,
1448 /* Output including mac */
1449 size = outputlen - iv_len;
1451 i = fill_sg_comp_from_iov(scatter_comp, i,
1452 params->dst_iov, 0, &size,
1455 if (unlikely(size)) {
1456 plt_dp_err("Insufficient buffer space,"
1463 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1464 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1466 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1468 /* This is DPTR len in case of SG mode */
1469 cpt_inst_w4.s.dlen = size;
1471 inst->dptr = (uint64_t)in_buffer;
1472 inst->w4.u64 = cpt_inst_w4.u64;
1477 static __rte_always_inline int
1478 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1479 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1481 void *m_vaddr = params->meta_buf.vaddr;
1483 int32_t inputlen = 0, outputlen;
1484 struct roc_se_ctx *se_ctx;
1485 uint8_t i = 0, iv_len = 8;
1486 uint32_t encr_offset;
1487 uint32_t encr_data_len;
1490 uint64_t *offset_vaddr;
1491 union cpt_inst_w4 cpt_inst_w4;
1493 uint32_t g_size_bytes, s_size_bytes;
1494 struct roc_se_sglist_comp *gather_comp;
1495 struct roc_se_sglist_comp *scatter_comp;
1497 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1498 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1500 se_ctx = params->ctx_buf.vaddr;
1501 flags = se_ctx->zsk_flags;
1503 cpt_inst_w4.u64 = 0;
1504 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1506 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1507 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1508 (dir << 4) | (0 << 3) | (flags & 0x7));
1511 * GP op header, lengths are expected in bits.
1513 cpt_inst_w4.s.param1 = encr_data_len;
1515 /* consider iv len */
1516 encr_offset += iv_len;
1518 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1519 outputlen = inputlen;
1521 /* save space for offset ctrl & iv */
1522 offset_vaddr = m_vaddr;
1524 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1526 /* DPTR has SG list */
1527 in_buffer = m_vaddr;
1529 ((uint16_t *)in_buffer)[0] = 0;
1530 ((uint16_t *)in_buffer)[1] = 0;
1532 /* TODO Add error check if space will be sufficient */
1533 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1540 /* Offset control word followed by iv */
1541 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1542 if (unlikely((encr_offset >> 16))) {
1543 plt_dp_err("Offset not supported");
1544 plt_dp_err("enc_offset: %d", encr_offset);
1548 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1549 ROC_SE_OFF_CTRL_LEN + iv_len);
1552 memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1555 /* Add input data */
1556 size = inputlen - iv_len;
1558 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1560 if (unlikely(size)) {
1561 plt_dp_err("Insufficient buffer space,"
1567 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1568 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1571 * Output Scatter List
1575 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1579 i = fill_sg_comp(scatter_comp, i,
1580 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1582 /* Add output data */
1583 size = outputlen - iv_len;
1585 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1587 if (unlikely(size)) {
1588 plt_dp_err("Insufficient buffer space,"
1594 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1595 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1597 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1599 /* This is DPTR len in case of SG mode */
1600 cpt_inst_w4.s.dlen = size;
1602 inst->dptr = (uint64_t)in_buffer;
1603 inst->w4.u64 = cpt_inst_w4.u64;
1608 static __rte_always_inline int
1609 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1610 struct roc_se_fc_params *fc_params,
1611 struct cpt_inst_s *inst)
1613 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1617 fc_type = ctx->fc_type;
1619 if (likely(fc_type == ROC_SE_FC_GEN)) {
1620 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1621 } else if (fc_type == ROC_SE_PDCP) {
1622 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
1624 } else if (fc_type == ROC_SE_KASUMI) {
1625 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1630 static __rte_always_inline int
1631 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1632 struct roc_se_fc_params *fc_params,
1633 struct cpt_inst_s *inst)
1635 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1639 fc_type = ctx->fc_type;
1641 if (likely(fc_type == ROC_SE_FC_GEN)) {
1642 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1643 } else if (fc_type == ROC_SE_PDCP) {
1644 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
1646 } else if (fc_type == ROC_SE_KASUMI) {
1647 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1654 static __rte_always_inline int
1655 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1657 struct rte_crypto_aead_xform *aead_form;
1658 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1659 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1660 uint32_t cipher_key_len = 0;
1661 uint8_t aes_gcm = 0;
1662 aead_form = &xform->aead;
1664 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1665 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1666 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1667 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1668 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1669 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1671 plt_dp_err("Unknown aead operation\n");
1674 switch (aead_form->algo) {
1675 case RTE_CRYPTO_AEAD_AES_GCM:
1676 enc_type = ROC_SE_AES_GCM;
1677 cipher_key_len = 16;
1680 case RTE_CRYPTO_AEAD_AES_CCM:
1681 plt_dp_err("Crypto: Unsupported cipher algo %u",
1684 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1685 enc_type = ROC_SE_CHACHA20;
1686 auth_type = ROC_SE_POLY1305;
1687 cipher_key_len = 32;
1688 sess->chacha_poly = 1;
1691 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1695 if (aead_form->key.length < cipher_key_len) {
1696 plt_dp_err("Invalid cipher params keylen %u",
1697 aead_form->key.length);
1701 sess->aes_gcm = aes_gcm;
1702 sess->mac_len = aead_form->digest_length;
1703 sess->iv_offset = aead_form->iv.offset;
1704 sess->iv_length = aead_form->iv.length;
1705 sess->aad_length = aead_form->aad_length;
1707 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1708 aead_form->key.data,
1709 aead_form->key.length, NULL)))
1712 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1713 aead_form->digest_length)))
1719 static __rte_always_inline int
1720 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1722 struct rte_crypto_cipher_xform *c_form;
1723 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1724 uint32_t cipher_key_len = 0;
1725 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1727 c_form = &xform->cipher;
1729 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1730 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1731 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1732 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1733 if (xform->next != NULL &&
1734 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1735 /* Perform decryption followed by auth verify */
1736 sess->roc_se_ctx.template_w4.s.opcode_minor =
1737 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1740 plt_dp_err("Unknown cipher operation\n");
1744 switch (c_form->algo) {
1745 case RTE_CRYPTO_CIPHER_AES_CBC:
1746 enc_type = ROC_SE_AES_CBC;
1747 cipher_key_len = 16;
1749 case RTE_CRYPTO_CIPHER_3DES_CBC:
1750 enc_type = ROC_SE_DES3_CBC;
1751 cipher_key_len = 24;
1753 case RTE_CRYPTO_CIPHER_DES_CBC:
1754 /* DES is implemented using 3DES in hardware */
1755 enc_type = ROC_SE_DES3_CBC;
1758 case RTE_CRYPTO_CIPHER_AES_CTR:
1759 enc_type = ROC_SE_AES_CTR;
1760 cipher_key_len = 16;
1763 case RTE_CRYPTO_CIPHER_NULL:
1767 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1768 enc_type = ROC_SE_KASUMI_F8_ECB;
1769 cipher_key_len = 16;
1770 zsk_flag = ROC_SE_K_F8;
1772 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1773 enc_type = ROC_SE_SNOW3G_UEA2;
1774 cipher_key_len = 16;
1775 zsk_flag = ROC_SE_ZS_EA;
1777 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1778 enc_type = ROC_SE_ZUC_EEA3;
1779 cipher_key_len = 16;
1780 zsk_flag = ROC_SE_ZS_EA;
1782 case RTE_CRYPTO_CIPHER_AES_XTS:
1783 enc_type = ROC_SE_AES_XTS;
1784 cipher_key_len = 16;
1786 case RTE_CRYPTO_CIPHER_3DES_ECB:
1787 enc_type = ROC_SE_DES3_ECB;
1788 cipher_key_len = 24;
1790 case RTE_CRYPTO_CIPHER_AES_ECB:
1791 enc_type = ROC_SE_AES_ECB;
1792 cipher_key_len = 16;
1794 case RTE_CRYPTO_CIPHER_3DES_CTR:
1795 case RTE_CRYPTO_CIPHER_AES_F8:
1796 case RTE_CRYPTO_CIPHER_ARC4:
1797 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1800 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1805 if (c_form->key.length < cipher_key_len) {
1806 plt_dp_err("Invalid cipher params keylen %u",
1807 c_form->key.length);
1811 sess->zsk_flag = zsk_flag;
1813 sess->aes_ctr = aes_ctr;
1814 sess->iv_offset = c_form->iv.offset;
1815 sess->iv_length = c_form->iv.length;
1816 sess->is_null = is_null;
1818 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1819 c_form->key.data, c_form->key.length,
1826 static __rte_always_inline int
1827 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1829 struct rte_crypto_auth_xform *a_form;
1830 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1831 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1833 if (xform->next != NULL &&
1834 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1835 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1836 /* Perform auth followed by encryption */
1837 sess->roc_se_ctx.template_w4.s.opcode_minor =
1838 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1841 a_form = &xform->auth;
1843 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1844 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1845 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1846 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1848 plt_dp_err("Unknown auth operation");
1852 switch (a_form->algo) {
1853 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1855 case RTE_CRYPTO_AUTH_SHA1:
1856 auth_type = ROC_SE_SHA1_TYPE;
1858 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1859 case RTE_CRYPTO_AUTH_SHA256:
1860 auth_type = ROC_SE_SHA2_SHA256;
1862 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1863 case RTE_CRYPTO_AUTH_SHA512:
1864 auth_type = ROC_SE_SHA2_SHA512;
1866 case RTE_CRYPTO_AUTH_AES_GMAC:
1867 auth_type = ROC_SE_GMAC_TYPE;
1870 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1871 case RTE_CRYPTO_AUTH_SHA224:
1872 auth_type = ROC_SE_SHA2_SHA224;
1874 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1875 case RTE_CRYPTO_AUTH_SHA384:
1876 auth_type = ROC_SE_SHA2_SHA384;
1878 case RTE_CRYPTO_AUTH_MD5_HMAC:
1879 case RTE_CRYPTO_AUTH_MD5:
1880 auth_type = ROC_SE_MD5_TYPE;
1882 case RTE_CRYPTO_AUTH_KASUMI_F9:
1883 auth_type = ROC_SE_KASUMI_F9_ECB;
1885 * Indicate that direction needs to be taken out
1888 zsk_flag = ROC_SE_K_F9;
1890 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1891 auth_type = ROC_SE_SNOW3G_UIA2;
1892 zsk_flag = ROC_SE_ZS_IA;
1894 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1895 auth_type = ROC_SE_ZUC_EIA3;
1896 zsk_flag = ROC_SE_ZS_IA;
1898 case RTE_CRYPTO_AUTH_NULL:
1902 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1903 case RTE_CRYPTO_AUTH_AES_CMAC:
1904 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1905 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1908 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1913 sess->zsk_flag = zsk_flag;
1914 sess->aes_gcm = aes_gcm;
1915 sess->mac_len = a_form->digest_length;
1916 sess->is_null = is_null;
1918 sess->auth_iv_offset = a_form->iv.offset;
1919 sess->auth_iv_length = a_form->iv.length;
1921 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1922 a_form->key.data, a_form->key.length,
1923 a_form->digest_length)))
1929 static __rte_always_inline int
1930 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1932 struct rte_crypto_auth_xform *a_form;
1933 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1934 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1936 a_form = &xform->auth;
1938 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1939 sess->cpt_op |= ROC_SE_OP_ENCODE;
1940 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1941 sess->cpt_op |= ROC_SE_OP_DECODE;
1943 plt_dp_err("Unknown auth operation");
1947 switch (a_form->algo) {
1948 case RTE_CRYPTO_AUTH_AES_GMAC:
1949 enc_type = ROC_SE_AES_GCM;
1950 auth_type = ROC_SE_GMAC_TYPE;
1953 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1961 sess->iv_offset = a_form->iv.offset;
1962 sess->iv_length = a_form->iv.length;
1963 sess->mac_len = a_form->digest_length;
1965 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1966 a_form->key.data, a_form->key.length,
1970 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1971 a_form->digest_length)))
1977 static __rte_always_inline void *
1978 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1979 struct rte_mempool *cpt_meta_pool,
1980 struct cpt_inflight_req *infl_req)
1984 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1990 infl_req->mdata = mdata;
1991 infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1996 static __rte_always_inline uint32_t
1997 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1998 uint32_t start_offset)
2001 void *seg_data = NULL;
2002 int32_t seg_size = 0;
2009 if (!start_offset) {
2010 seg_data = rte_pktmbuf_mtod(pkt, void *);
2011 seg_size = pkt->data_len;
2013 while (start_offset >= pkt->data_len) {
2014 start_offset -= pkt->data_len;
2018 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2019 seg_size = pkt->data_len - start_offset;
2025 iovec->bufs[index].vaddr = seg_data;
2026 iovec->bufs[index].size = seg_size;
2030 while (unlikely(pkt != NULL)) {
2031 seg_data = rte_pktmbuf_mtod(pkt, void *);
2032 seg_size = pkt->data_len;
2036 iovec->bufs[index].vaddr = seg_data;
2037 iovec->bufs[index].size = seg_size;
2044 iovec->buf_cnt = index;
2048 static __rte_always_inline uint32_t
2049 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2050 struct roc_se_fc_params *param, uint32_t *flags)
2053 void *seg_data = NULL;
2054 uint32_t seg_size = 0;
2055 struct roc_se_iov_ptr *iovec;
2057 seg_data = rte_pktmbuf_mtod(pkt, void *);
2058 seg_size = pkt->data_len;
2061 if (likely(!pkt->next)) {
2064 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2065 headroom = rte_pktmbuf_headroom(pkt);
2066 if (likely(headroom >= 24))
2067 *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2069 param->bufs[0].vaddr = seg_data;
2070 param->bufs[0].size = seg_size;
2073 iovec = param->src_iov;
2074 iovec->bufs[index].vaddr = seg_data;
2075 iovec->bufs[index].size = seg_size;
2079 while (unlikely(pkt != NULL)) {
2080 seg_data = rte_pktmbuf_mtod(pkt, void *);
2081 seg_size = pkt->data_len;
2086 iovec->bufs[index].vaddr = seg_data;
2087 iovec->bufs[index].size = seg_size;
2094 iovec->buf_cnt = index;
2098 static __rte_always_inline int
2099 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2100 struct cpt_qp_meta_info *m_info,
2101 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2103 struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2104 uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2105 struct rte_crypto_sym_op *sym_op = cop->sym;
2107 uint32_t mc_hash_off;
2109 uint64_t d_offs, d_lens;
2110 struct rte_mbuf *m_src, *m_dst;
2111 uint8_t cpt_op = sess->cpt_op;
2112 #ifdef CPT_ALWAYS_USE_SG_MODE
2113 uint8_t inplace = 0;
2115 uint8_t inplace = 1;
2117 struct roc_se_fc_params fc_params;
2118 char src[SRC_IOV_SIZE];
2119 char dst[SRC_IOV_SIZE];
2123 if (likely(sess->iv_length)) {
2124 flags |= ROC_SE_VALID_IV_BUF;
2125 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2127 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2128 memcpy((uint8_t *)iv_buf,
2129 rte_crypto_op_ctod_offset(cop, uint8_t *,
2132 iv_buf[3] = rte_cpu_to_be_32(0x1);
2133 fc_params.iv_buf = iv_buf;
2137 if (sess->zsk_flag) {
2138 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2139 cop, uint8_t *, sess->auth_iv_offset);
2140 if (sess->zsk_flag != ROC_SE_ZS_EA)
2143 m_src = sym_op->m_src;
2144 m_dst = sym_op->m_dst;
2146 if (sess->aes_gcm || sess->chacha_poly) {
2151 d_offs = sym_op->aead.data.offset;
2152 d_lens = sym_op->aead.data.length;
2154 sym_op->aead.data.offset + sym_op->aead.data.length;
2156 aad_data = sym_op->aead.aad.data;
2157 aad_len = sess->aad_length;
2158 if (likely((aad_data + aad_len) ==
2159 rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2160 sym_op->aead.data.offset))) {
2161 d_offs = (d_offs - aad_len) | (d_offs << 16);
2162 d_lens = (d_lens + aad_len) | (d_lens << 32);
2164 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2165 fc_params.aad_buf.size = aad_len;
2166 flags |= ROC_SE_VALID_AAD_BUF;
2168 d_offs = d_offs << 16;
2169 d_lens = d_lens << 32;
2172 salt = fc_params.iv_buf;
2173 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2174 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2175 sess->salt = *(uint32_t *)salt;
2177 fc_params.iv_buf = salt + 4;
2178 if (likely(sess->mac_len)) {
2179 struct rte_mbuf *m =
2180 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2185 /* hmac immediately following data is best case */
2186 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2188 (uint8_t *)sym_op->aead.digest.data)) {
2189 flags |= ROC_SE_VALID_MAC_BUF;
2190 fc_params.mac_buf.size = sess->mac_len;
2191 fc_params.mac_buf.vaddr =
2192 sym_op->aead.digest.data;
2197 d_offs = sym_op->cipher.data.offset;
2198 d_lens = sym_op->cipher.data.length;
2200 sym_op->cipher.data.offset + sym_op->cipher.data.length;
2201 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2202 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2205 (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2206 mc_hash_off = (sym_op->auth.data.offset +
2207 sym_op->auth.data.length);
2209 /* for gmac, salt should be updated like in gcm */
2210 if (unlikely(sess->is_gmac)) {
2212 salt = fc_params.iv_buf;
2213 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2214 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2215 sess->salt = *(uint32_t *)salt;
2217 fc_params.iv_buf = salt + 4;
2219 if (likely(sess->mac_len)) {
2222 m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2226 /* hmac immediately following data is best case */
2227 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2228 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2230 (uint8_t *)sym_op->auth.digest.data))) {
2231 flags |= ROC_SE_VALID_MAC_BUF;
2232 fc_params.mac_buf.size = sess->mac_len;
2233 fc_params.mac_buf.vaddr =
2234 sym_op->auth.digest.data;
2239 fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2241 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2242 unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2245 if (likely(!m_dst && inplace)) {
2246 /* Case of single buffer without AAD buf or
2247 * separate mac buf in place and
2250 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2252 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2254 plt_dp_err("Prepare inplace src iov failed");
2260 /* Out of place processing */
2261 fc_params.src_iov = (void *)src;
2262 fc_params.dst_iov = (void *)dst;
2264 /* Store SG I/O in the api for reuse */
2265 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2266 plt_dp_err("Prepare src iov failed");
2271 if (unlikely(m_dst != NULL)) {
2274 /* Try to make room as much as src has */
2275 pkt_len = rte_pktmbuf_pkt_len(m_dst);
2277 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2278 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2279 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2280 plt_dp_err("Not enough space in "
2289 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2290 plt_dp_err("Prepare dst iov failed for "
2297 fc_params.dst_iov = (void *)src;
2301 if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2302 (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2303 ((ctx->fc_type == ROC_SE_FC_GEN) ||
2304 (ctx->fc_type == ROC_SE_PDCP))))) {
2305 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2306 m_info->pool, infl_req);
2307 if (mdata == NULL) {
2308 plt_dp_err("Error allocating meta buffer for request");
2313 /* Finally prepare the instruction */
2314 if (cpt_op & ROC_SE_OP_ENCODE)
2315 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2318 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2321 if (unlikely(ret)) {
2322 plt_dp_err("Preparing request failed due to bad input arg");
2323 goto free_mdata_and_exit;
2328 free_mdata_and_exit:
2329 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2330 rte_mempool_put(m_info->pool, infl_req->mdata);
2335 #endif /*_CNXK_SE_H_ */