1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
12 #define SRC_IOV_SIZE \
13 (sizeof(struct roc_se_iov_ptr) + \
14 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE \
16 (sizeof(struct roc_se_iov_ptr) + \
17 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
21 uint16_t zsk_flag : 4;
24 uint16_t chacha_poly : 1;
31 uint8_t auth_iv_length;
33 uint16_t auth_iv_offset;
37 struct cnxk_cpt_qp *qp;
38 struct roc_se_ctx roc_se_ctx;
39 } __rte_cache_aligned;
41 static __rte_always_inline int
42 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess);
45 cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
47 iv_dst[16] = iv_src[16];
48 /* pack the last 8 bytes of IV to 6 bytes.
49 * discard the 2 MSB bits of each byte
51 iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
52 iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
53 iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
55 iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
56 iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
57 iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
61 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
64 uint32_t *iv_s_temp, iv_temp[4];
67 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
69 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
70 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
73 iv_s_temp = (uint32_t *)iv_s;
75 for (j = 0; j < 4; j++)
76 iv_temp[j] = iv_s_temp[3 - j];
77 memcpy(iv_d, iv_temp, 16);
78 } else if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_ZUC) {
79 /* ZUC doesn't need a swap */
80 memcpy(iv_d, iv_s, 16);
82 cpt_pack_iv(iv_s, iv_d);
84 /* AES-CMAC EIA2, microcode expects 16B zeroized IV */
85 for (j = 0; j < 4; j++)
90 static __rte_always_inline int
91 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
93 uint16_t mac_len = auth->digest_length;
97 case RTE_CRYPTO_AUTH_MD5:
98 case RTE_CRYPTO_AUTH_MD5_HMAC:
99 ret = (mac_len == 16) ? 0 : -1;
101 case RTE_CRYPTO_AUTH_SHA1:
102 case RTE_CRYPTO_AUTH_SHA1_HMAC:
103 ret = (mac_len == 20) ? 0 : -1;
105 case RTE_CRYPTO_AUTH_SHA224:
106 case RTE_CRYPTO_AUTH_SHA224_HMAC:
107 ret = (mac_len == 28) ? 0 : -1;
109 case RTE_CRYPTO_AUTH_SHA256:
110 case RTE_CRYPTO_AUTH_SHA256_HMAC:
111 ret = (mac_len == 32) ? 0 : -1;
113 case RTE_CRYPTO_AUTH_SHA384:
114 case RTE_CRYPTO_AUTH_SHA384_HMAC:
115 ret = (mac_len == 48) ? 0 : -1;
117 case RTE_CRYPTO_AUTH_SHA512:
118 case RTE_CRYPTO_AUTH_SHA512_HMAC:
119 ret = (mac_len == 64) ? 0 : -1;
121 case RTE_CRYPTO_AUTH_NULL:
131 static __rte_always_inline void
132 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
134 struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
135 memcpy(fctx->enc.encr_iv, salt, 4);
138 static __rte_always_inline uint32_t
139 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
142 struct roc_se_sglist_comp *to = &list[i >> 2];
144 to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
145 to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
150 static __rte_always_inline uint32_t
151 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
152 struct roc_se_buf_ptr *from)
154 struct roc_se_sglist_comp *to = &list[i >> 2];
156 to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
157 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
162 static __rte_always_inline uint32_t
163 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
164 struct roc_se_buf_ptr *from, uint32_t *psize)
166 struct roc_se_sglist_comp *to = &list[i >> 2];
167 uint32_t size = *psize;
170 e_len = (size > from->size) ? from->size : size;
171 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
172 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
179 * This fills the MC expected SGIO list
180 * from IOV given by user.
182 static __rte_always_inline uint32_t
183 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
184 struct roc_se_iov_ptr *from, uint32_t from_offset,
185 uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
186 uint32_t extra_offset)
189 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
190 uint32_t size = *psize;
191 struct roc_se_buf_ptr *bufs;
194 for (j = 0; (j < from->buf_cnt) && size; j++) {
197 struct roc_se_sglist_comp *to = &list[i >> 2];
199 if (unlikely(from_offset)) {
200 if (from_offset >= bufs[j].size) {
201 from_offset -= bufs[j].size;
204 e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
205 e_len = (size > (bufs[j].size - from_offset)) ?
206 (bufs[j].size - from_offset) :
210 e_vaddr = (uint64_t)bufs[j].vaddr;
211 e_len = (size > bufs[j].size) ? bufs[j].size : size;
214 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
215 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
217 if (extra_len && (e_len >= extra_offset)) {
218 /* Break the data at given offset */
219 uint32_t next_len = e_len - extra_offset;
220 uint64_t next_vaddr = e_vaddr + extra_offset;
225 e_len = extra_offset;
227 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
230 extra_len = RTE_MIN(extra_len, size);
231 /* Insert extra data ptr */
236 rte_cpu_to_be_16(extra_len);
237 to->ptr[i % 4] = rte_cpu_to_be_64(
238 (uint64_t)extra_buf->vaddr);
242 next_len = RTE_MIN(next_len, size);
243 /* insert the rest of the data */
247 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
248 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
257 extra_offset -= size;
265 static __rte_always_inline int
266 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
267 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
269 void *m_vaddr = params->meta_buf.vaddr;
271 uint16_t data_len, mac_len, key_len;
272 roc_se_auth_type hash_type;
273 struct roc_se_ctx *ctx;
274 struct roc_se_sglist_comp *gather_comp;
275 struct roc_se_sglist_comp *scatter_comp;
277 uint32_t g_size_bytes, s_size_bytes;
278 union cpt_inst_w4 cpt_inst_w4;
280 ctx = params->ctx_buf.vaddr;
282 hash_type = ctx->hash_type;
283 mac_len = ctx->mac_len;
284 key_len = ctx->auth_key_len;
285 data_len = ROC_SE_AUTH_DLEN(d_lens);
288 cpt_inst_w4.s.opcode_minor = 0;
289 cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
291 cpt_inst_w4.s.opcode_major =
292 ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
293 cpt_inst_w4.s.param1 = key_len;
294 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
296 cpt_inst_w4.s.opcode_major =
297 ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
298 cpt_inst_w4.s.param1 = 0;
299 cpt_inst_w4.s.dlen = data_len;
302 /* Null auth only case enters the if */
303 if (unlikely(!hash_type && !ctx->enc_cipher)) {
304 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
305 /* Minor op is passthrough */
306 cpt_inst_w4.s.opcode_minor = 0x03;
307 /* Send out completion code only */
308 cpt_inst_w4.s.param2 = 0x1;
311 /* DPTR has SG list */
314 ((uint16_t *)in_buffer)[0] = 0;
315 ((uint16_t *)in_buffer)[1] = 0;
317 /* TODO Add error check if space will be sufficient */
318 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
327 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
329 i = fill_sg_comp(gather_comp, i, k_vaddr,
330 RTE_ALIGN_CEIL(key_len, 8));
336 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
338 if (unlikely(size)) {
339 plt_dp_err("Insufficient dst IOV size, short by %dB",
345 * Looks like we need to support zero data
346 * gather ptr in case of hash & hmac
350 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
351 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
358 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
361 if (flags & ROC_SE_VALID_MAC_BUF) {
362 if (unlikely(params->mac_buf.size < mac_len)) {
363 plt_dp_err("Insufficient MAC size");
368 i = fill_sg_comp_from_buf_min(scatter_comp, i, ¶ms->mac_buf,
372 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
373 data_len, &size, NULL, 0);
374 if (unlikely(size)) {
375 plt_dp_err("Insufficient dst IOV size, short by %dB",
381 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
382 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
384 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
386 /* This is DPTR len in case of SG mode */
387 cpt_inst_w4.s.dlen = size;
389 inst->dptr = (uint64_t)in_buffer;
390 inst->w4.u64 = cpt_inst_w4.u64;
395 static __rte_always_inline int
396 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
397 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
399 uint32_t iv_offset = 0;
400 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
401 struct roc_se_ctx *se_ctx;
402 uint32_t cipher_type, hash_type;
403 uint32_t mac_len, size;
405 struct roc_se_buf_ptr *aad_buf = NULL;
406 uint32_t encr_offset, auth_offset;
407 uint32_t encr_data_len, auth_data_len, aad_len = 0;
408 uint32_t passthrough_len = 0;
409 union cpt_inst_w4 cpt_inst_w4;
413 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
414 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
415 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
416 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
417 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
418 /* We don't support both AAD and auth data separately */
421 aad_len = fc_params->aad_buf.size;
422 aad_buf = &fc_params->aad_buf;
424 se_ctx = fc_params->ctx_buf.vaddr;
425 cipher_type = se_ctx->enc_cipher;
426 hash_type = se_ctx->hash_type;
427 mac_len = se_ctx->mac_len;
428 op_minor = se_ctx->template_w4.s.opcode_minor;
430 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
432 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
435 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
437 * When AAD is given, data above encr_offset is pass through
438 * Since AAD is given as separate pointer and not as offset,
439 * this is a special case as we need to fragment input data
440 * into passthrough + encr_data and then insert AAD in between.
442 if (hash_type != ROC_SE_GMAC_TYPE) {
443 passthrough_len = encr_offset;
444 auth_offset = passthrough_len + iv_len;
445 encr_offset = passthrough_len + aad_len + iv_len;
446 auth_data_len = aad_len + encr_data_len;
448 passthrough_len = 16 + aad_len;
449 auth_offset = passthrough_len + iv_len;
450 auth_data_len = aad_len;
453 encr_offset += iv_len;
454 auth_offset += iv_len;
458 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
459 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
460 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
462 if (hash_type == ROC_SE_GMAC_TYPE) {
467 auth_dlen = auth_offset + auth_data_len;
468 enc_dlen = encr_data_len + encr_offset;
469 if (unlikely(encr_data_len & 0xf)) {
470 if ((cipher_type == ROC_SE_DES3_CBC) ||
471 (cipher_type == ROC_SE_DES3_ECB))
473 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
474 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
475 (cipher_type == ROC_SE_AES_ECB)))
477 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
480 if (unlikely(auth_dlen > enc_dlen)) {
481 inputlen = auth_dlen;
482 outputlen = auth_dlen + mac_len;
485 outputlen = enc_dlen + mac_len;
488 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
489 outputlen = enc_dlen;
492 cpt_inst_w4.s.param1 = encr_data_len;
493 cpt_inst_w4.s.param2 = auth_data_len;
496 * In cn9k, cn10k since we have a limitation of
497 * IV & Offset control word not part of instruction
498 * and need to be part of Data Buffer, we check if
499 * head room is there and then only do the Direct mode processing
501 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
502 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
503 void *dm_vaddr = fc_params->bufs[0].vaddr;
505 /* Use Direct mode */
508 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
511 inst->dptr = (uint64_t)offset_vaddr;
513 /* RPTR should just exclude offset control word */
514 inst->rptr = (uint64_t)dm_vaddr - iv_len;
516 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
518 if (likely(iv_len)) {
519 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
520 ROC_SE_OFF_CTRL_LEN);
521 uint64_t *src = fc_params->iv_buf;
527 void *m_vaddr = fc_params->meta_buf.vaddr;
528 uint32_t i, g_size_bytes, s_size_bytes;
529 struct roc_se_sglist_comp *gather_comp;
530 struct roc_se_sglist_comp *scatter_comp;
533 /* This falls under strict SG mode */
534 offset_vaddr = m_vaddr;
535 size = ROC_SE_OFF_CTRL_LEN + iv_len;
537 m_vaddr = (uint8_t *)m_vaddr + size;
539 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
541 if (likely(iv_len)) {
542 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
543 ROC_SE_OFF_CTRL_LEN);
544 uint64_t *src = fc_params->iv_buf;
549 /* DPTR has SG list */
552 ((uint16_t *)in_buffer)[0] = 0;
553 ((uint16_t *)in_buffer)[1] = 0;
555 /* TODO Add error check if space will be sufficient */
557 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
565 /* Offset control word that includes iv */
566 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
567 ROC_SE_OFF_CTRL_LEN + iv_len);
570 size = inputlen - iv_len;
572 uint32_t aad_offset = aad_len ? passthrough_len : 0;
574 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
575 i = fill_sg_comp_from_buf_min(
576 gather_comp, i, fc_params->bufs, &size);
578 i = fill_sg_comp_from_iov(
579 gather_comp, i, fc_params->src_iov, 0,
580 &size, aad_buf, aad_offset);
583 if (unlikely(size)) {
584 plt_dp_err("Insufficient buffer space,"
590 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
592 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
595 * Output Scatter list
599 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
603 if (likely(iv_len)) {
604 i = fill_sg_comp(scatter_comp, i,
605 (uint64_t)offset_vaddr +
610 /* output data or output data + digest*/
611 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
612 size = outputlen - iv_len - mac_len;
614 uint32_t aad_offset =
615 aad_len ? passthrough_len : 0;
618 ROC_SE_SINGLE_BUF_INPLACE)) {
619 i = fill_sg_comp_from_buf_min(
621 fc_params->bufs, &size);
623 i = fill_sg_comp_from_iov(
625 fc_params->dst_iov, 0, &size,
626 aad_buf, aad_offset);
628 if (unlikely(size)) {
629 plt_dp_err("Insufficient buffer"
630 " space, size %d needed",
637 i = fill_sg_comp_from_buf(scatter_comp, i,
638 &fc_params->mac_buf);
641 /* Output including mac */
642 size = outputlen - iv_len;
644 uint32_t aad_offset =
645 aad_len ? passthrough_len : 0;
648 ROC_SE_SINGLE_BUF_INPLACE)) {
649 i = fill_sg_comp_from_buf_min(
651 fc_params->bufs, &size);
653 i = fill_sg_comp_from_iov(
655 fc_params->dst_iov, 0, &size,
656 aad_buf, aad_offset);
658 if (unlikely(size)) {
659 plt_dp_err("Insufficient buffer"
660 " space, size %d needed",
666 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
668 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
670 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
672 /* This is DPTR len in case of SG mode */
673 cpt_inst_w4.s.dlen = size;
675 inst->dptr = (uint64_t)in_buffer;
678 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
679 (auth_offset >> 8))) {
680 plt_dp_err("Offset not supported");
681 plt_dp_err("enc_offset: %d", encr_offset);
682 plt_dp_err("iv_offset : %d", iv_offset);
683 plt_dp_err("auth_offset: %d", auth_offset);
687 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
688 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
689 ((uint64_t)auth_offset));
691 inst->w4.u64 = cpt_inst_w4.u64;
695 static __rte_always_inline int
696 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
697 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
699 uint32_t iv_offset = 0, size;
700 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
701 struct roc_se_ctx *se_ctx;
702 int32_t hash_type, mac_len;
704 struct roc_se_buf_ptr *aad_buf = NULL;
705 uint32_t encr_offset, auth_offset;
706 uint32_t encr_data_len, auth_data_len, aad_len = 0;
707 uint32_t passthrough_len = 0;
708 union cpt_inst_w4 cpt_inst_w4;
712 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
713 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
714 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
715 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
717 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
718 /* We don't support both AAD and auth data separately */
721 aad_len = fc_params->aad_buf.size;
722 aad_buf = &fc_params->aad_buf;
725 se_ctx = fc_params->ctx_buf.vaddr;
726 hash_type = se_ctx->hash_type;
727 mac_len = se_ctx->mac_len;
728 op_minor = se_ctx->template_w4.s.opcode_minor;
730 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
732 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
735 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
737 * When AAD is given, data above encr_offset is pass through
738 * Since AAD is given as separate pointer and not as offset,
739 * this is a special case as we need to fragment input data
740 * into passthrough + encr_data and then insert AAD in between.
742 if (hash_type != ROC_SE_GMAC_TYPE) {
743 passthrough_len = encr_offset;
744 auth_offset = passthrough_len + iv_len;
745 encr_offset = passthrough_len + aad_len + iv_len;
746 auth_data_len = aad_len + encr_data_len;
748 passthrough_len = 16 + aad_len;
749 auth_offset = passthrough_len + iv_len;
750 auth_data_len = aad_len;
753 encr_offset += iv_len;
754 auth_offset += iv_len;
758 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
759 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
760 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
762 if (hash_type == ROC_SE_GMAC_TYPE) {
767 enc_dlen = encr_offset + encr_data_len;
768 auth_dlen = auth_offset + auth_data_len;
770 if (auth_dlen > enc_dlen) {
771 inputlen = auth_dlen + mac_len;
772 outputlen = auth_dlen;
774 inputlen = enc_dlen + mac_len;
775 outputlen = enc_dlen;
778 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
779 outputlen = inputlen = enc_dlen;
781 cpt_inst_w4.s.param1 = encr_data_len;
782 cpt_inst_w4.s.param2 = auth_data_len;
785 * In cn9k, cn10k since we have a limitation of
786 * IV & Offset control word not part of instruction
787 * and need to be part of Data Buffer, we check if
788 * head room is there and then only do the Direct mode processing
790 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
791 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
792 void *dm_vaddr = fc_params->bufs[0].vaddr;
794 /* Use Direct mode */
797 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
798 inst->dptr = (uint64_t)offset_vaddr;
800 /* RPTR should just exclude offset control word */
801 inst->rptr = (uint64_t)dm_vaddr - iv_len;
803 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
805 if (likely(iv_len)) {
806 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
807 ROC_SE_OFF_CTRL_LEN);
808 uint64_t *src = fc_params->iv_buf;
814 void *m_vaddr = fc_params->meta_buf.vaddr;
815 uint32_t g_size_bytes, s_size_bytes;
816 struct roc_se_sglist_comp *gather_comp;
817 struct roc_se_sglist_comp *scatter_comp;
821 /* This falls under strict SG mode */
822 offset_vaddr = m_vaddr;
823 size = ROC_SE_OFF_CTRL_LEN + iv_len;
825 m_vaddr = (uint8_t *)m_vaddr + size;
827 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
829 if (likely(iv_len)) {
830 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
831 ROC_SE_OFF_CTRL_LEN);
832 uint64_t *src = fc_params->iv_buf;
837 /* DPTR has SG list */
840 ((uint16_t *)in_buffer)[0] = 0;
841 ((uint16_t *)in_buffer)[1] = 0;
843 /* TODO Add error check if space will be sufficient */
845 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
852 /* Offset control word that includes iv */
853 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
854 ROC_SE_OFF_CTRL_LEN + iv_len);
857 if (flags & ROC_SE_VALID_MAC_BUF) {
858 size = inputlen - iv_len - mac_len;
860 /* input data only */
862 ROC_SE_SINGLE_BUF_INPLACE)) {
863 i = fill_sg_comp_from_buf_min(
864 gather_comp, i, fc_params->bufs,
867 uint32_t aad_offset =
868 aad_len ? passthrough_len : 0;
870 i = fill_sg_comp_from_iov(
872 fc_params->src_iov, 0, &size,
873 aad_buf, aad_offset);
875 if (unlikely(size)) {
876 plt_dp_err("Insufficient buffer"
877 " space, size %d needed",
885 i = fill_sg_comp_from_buf(gather_comp, i,
886 &fc_params->mac_buf);
889 /* input data + mac */
890 size = inputlen - iv_len;
893 ROC_SE_SINGLE_BUF_INPLACE)) {
894 i = fill_sg_comp_from_buf_min(
895 gather_comp, i, fc_params->bufs,
898 uint32_t aad_offset =
899 aad_len ? passthrough_len : 0;
901 if (unlikely(!fc_params->src_iov)) {
902 plt_dp_err("Bad input args");
906 i = fill_sg_comp_from_iov(
908 fc_params->src_iov, 0, &size,
909 aad_buf, aad_offset);
912 if (unlikely(size)) {
913 plt_dp_err("Insufficient buffer"
914 " space, size %d needed",
920 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
922 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
925 * Output Scatter List
930 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
935 i = fill_sg_comp(scatter_comp, i,
936 (uint64_t)offset_vaddr +
941 /* Add output data */
942 size = outputlen - iv_len;
944 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
945 /* handle single buffer here */
946 i = fill_sg_comp_from_buf_min(scatter_comp, i,
950 uint32_t aad_offset =
951 aad_len ? passthrough_len : 0;
953 if (unlikely(!fc_params->dst_iov)) {
954 plt_dp_err("Bad input args");
958 i = fill_sg_comp_from_iov(
959 scatter_comp, i, fc_params->dst_iov, 0,
960 &size, aad_buf, aad_offset);
963 if (unlikely(size)) {
964 plt_dp_err("Insufficient buffer space,"
971 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
973 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
975 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
977 /* This is DPTR len in case of SG mode */
978 cpt_inst_w4.s.dlen = size;
980 inst->dptr = (uint64_t)in_buffer;
983 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
984 (auth_offset >> 8))) {
985 plt_dp_err("Offset not supported");
986 plt_dp_err("enc_offset: %d", encr_offset);
987 plt_dp_err("iv_offset : %d", iv_offset);
988 plt_dp_err("auth_offset: %d", auth_offset);
992 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
993 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
994 ((uint64_t)auth_offset));
996 inst->w4.u64 = cpt_inst_w4.u64;
1000 static __rte_always_inline int
1001 cpt_pdcp_alg_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1002 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1005 int32_t inputlen, outputlen;
1006 struct roc_se_ctx *se_ctx;
1007 uint32_t mac_len = 0;
1008 uint8_t pdcp_alg_type;
1009 uint32_t encr_offset, auth_offset;
1010 uint32_t encr_data_len, auth_data_len;
1012 uint64_t offset_ctrl;
1013 uint64_t *offset_vaddr;
1015 uint8_t pack_iv = 0;
1016 union cpt_inst_w4 cpt_inst_w4;
1018 se_ctx = params->ctx_buf.vaddr;
1019 flags = se_ctx->zsk_flags;
1020 mac_len = se_ctx->mac_len;
1021 pdcp_alg_type = se_ctx->pdcp_alg_type;
1023 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_PDCP;
1024 cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
1027 iv_s = params->auth_iv_buf;
1030 * Microcode expects offsets in bytes
1031 * TODO: Rounding off
1033 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1034 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1036 if (se_ctx->pdcp_alg_type != ROC_SE_PDCP_ALG_TYPE_AES_CTR) {
1037 iv_len = params->auth_iv_len;
1044 auth_offset = auth_offset / 8;
1046 /* consider iv len */
1047 auth_offset += iv_len;
1050 auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1054 /* consider iv len */
1055 auth_offset += iv_len;
1057 inputlen = auth_offset + auth_data_len;
1060 outputlen = mac_len;
1062 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1067 iv_s = params->iv_buf;
1068 iv_len = params->cipher_iv_len;
1076 * Microcode expects offsets in bytes
1077 * TODO: Rounding off
1079 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1081 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1082 encr_offset = encr_offset / 8;
1083 /* consider iv len */
1084 encr_offset += iv_len;
1086 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1087 outputlen = inputlen;
1089 /* iv offset is 0 */
1090 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1096 if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1097 plt_dp_err("Offset not supported");
1098 plt_dp_err("enc_offset: %d", encr_offset);
1099 plt_dp_err("auth_offset: %d", auth_offset);
1104 * GP op header, lengths are expected in bits.
1106 cpt_inst_w4.s.param1 = encr_data_len;
1107 cpt_inst_w4.s.param2 = auth_data_len;
1110 * In cn9k, cn10k since we have a limitation of
1111 * IV & Offset control word not part of instruction
1112 * and need to be part of Data Buffer, we check if
1113 * head room is there and then only do the Direct mode processing
1115 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1116 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1117 void *dm_vaddr = params->bufs[0].vaddr;
1119 /* Use Direct mode */
1121 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1122 ROC_SE_OFF_CTRL_LEN - iv_len);
1125 inst->dptr = (uint64_t)offset_vaddr;
1126 /* RPTR should just exclude offset control word */
1127 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1129 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1131 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1132 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1134 *offset_vaddr = offset_ctrl;
1136 void *m_vaddr = params->meta_buf.vaddr;
1137 uint32_t i, g_size_bytes, s_size_bytes;
1138 struct roc_se_sglist_comp *gather_comp;
1139 struct roc_se_sglist_comp *scatter_comp;
1143 /* save space for iv */
1144 offset_vaddr = m_vaddr;
1146 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN +
1147 RTE_ALIGN_CEIL(iv_len, 8);
1149 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1151 /* DPTR has SG list */
1152 in_buffer = m_vaddr;
1154 ((uint16_t *)in_buffer)[0] = 0;
1155 ((uint16_t *)in_buffer)[1] = 0;
1157 /* TODO Add error check if space will be sufficient */
1159 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1166 /* Offset control word followed by iv */
1168 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1169 ROC_SE_OFF_CTRL_LEN + iv_len);
1171 /* iv offset is 0 */
1172 *offset_vaddr = offset_ctrl;
1174 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1175 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1178 size = inputlen - iv_len;
1180 i = fill_sg_comp_from_iov(gather_comp, i,
1181 params->src_iov, 0, &size,
1183 if (unlikely(size)) {
1184 plt_dp_err("Insufficient buffer space,"
1190 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1192 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1195 * Output Scatter List
1200 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1204 /* IV in SLIST only for EEA3 & UEA2 */
1209 i = fill_sg_comp(scatter_comp, i,
1210 (uint64_t)offset_vaddr +
1211 ROC_SE_OFF_CTRL_LEN,
1215 /* Add output data */
1216 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1217 size = outputlen - iv_len - mac_len;
1219 i = fill_sg_comp_from_iov(scatter_comp, i,
1223 if (unlikely(size)) {
1224 plt_dp_err("Insufficient buffer space,"
1233 i = fill_sg_comp_from_buf(scatter_comp, i,
1237 /* Output including mac */
1238 size = outputlen - iv_len;
1240 i = fill_sg_comp_from_iov(scatter_comp, i,
1244 if (unlikely(size)) {
1245 plt_dp_err("Insufficient buffer space,"
1252 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1254 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1256 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1258 /* This is DPTR len in case of SG mode */
1259 cpt_inst_w4.s.dlen = size;
1261 inst->dptr = (uint64_t)in_buffer;
1264 inst->w4.u64 = cpt_inst_w4.u64;
1269 static __rte_always_inline int
1270 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1271 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1273 void *m_vaddr = params->meta_buf.vaddr;
1275 int32_t inputlen = 0, outputlen = 0;
1276 struct roc_se_ctx *se_ctx;
1277 uint32_t mac_len = 0;
1279 uint32_t encr_offset, auth_offset;
1280 uint32_t encr_data_len, auth_data_len;
1282 uint8_t *iv_s, *iv_d, iv_len = 8;
1284 uint64_t *offset_vaddr;
1285 union cpt_inst_w4 cpt_inst_w4;
1287 uint32_t g_size_bytes, s_size_bytes;
1288 struct roc_se_sglist_comp *gather_comp;
1289 struct roc_se_sglist_comp *scatter_comp;
1291 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1292 auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1293 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1294 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1296 se_ctx = params->ctx_buf.vaddr;
1297 flags = se_ctx->zsk_flags;
1298 mac_len = se_ctx->mac_len;
1301 iv_s = params->iv_buf;
1303 iv_s = params->auth_iv_buf;
1305 dir = iv_s[8] & 0x1;
1307 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1309 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1310 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1311 (dir << 4) | (0 << 3) | (flags & 0x7));
1314 * GP op header, lengths are expected in bits.
1316 cpt_inst_w4.s.param1 = encr_data_len;
1317 cpt_inst_w4.s.param2 = auth_data_len;
1319 /* consider iv len */
1321 encr_offset += iv_len;
1322 auth_offset += iv_len;
1325 /* save space for offset ctrl and iv */
1326 offset_vaddr = m_vaddr;
1328 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1330 /* DPTR has SG list */
1331 in_buffer = m_vaddr;
1333 ((uint16_t *)in_buffer)[0] = 0;
1334 ((uint16_t *)in_buffer)[1] = 0;
1336 /* TODO Add error check if space will be sufficient */
1337 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1344 /* Offset control word followed by iv */
1347 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1348 outputlen = inputlen;
1349 /* iv offset is 0 */
1350 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1351 if (unlikely((encr_offset >> 16))) {
1352 plt_dp_err("Offset not supported");
1353 plt_dp_err("enc_offset: %d", encr_offset);
1357 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1358 outputlen = mac_len;
1359 /* iv offset is 0 */
1360 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1361 if (unlikely((auth_offset >> 8))) {
1362 plt_dp_err("Offset not supported");
1363 plt_dp_err("auth_offset: %d", auth_offset);
1368 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1369 ROC_SE_OFF_CTRL_LEN + iv_len);
1372 iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1373 memcpy(iv_d, iv_s, iv_len);
1376 size = inputlen - iv_len;
1378 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1381 if (unlikely(size)) {
1382 plt_dp_err("Insufficient buffer space,"
1388 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1389 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1392 * Output Scatter List
1396 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1400 /* IV in SLIST only for F8 */
1406 i = fill_sg_comp(scatter_comp, i,
1407 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1411 /* Add output data */
1412 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1413 size = outputlen - iv_len - mac_len;
1415 i = fill_sg_comp_from_iov(scatter_comp, i,
1416 params->dst_iov, 0, &size,
1419 if (unlikely(size)) {
1420 plt_dp_err("Insufficient buffer space,"
1429 i = fill_sg_comp_from_buf(scatter_comp, i,
1433 /* Output including mac */
1434 size = outputlen - iv_len;
1436 i = fill_sg_comp_from_iov(scatter_comp, i,
1437 params->dst_iov, 0, &size,
1440 if (unlikely(size)) {
1441 plt_dp_err("Insufficient buffer space,"
1448 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1449 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1451 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1453 /* This is DPTR len in case of SG mode */
1454 cpt_inst_w4.s.dlen = size;
1456 inst->dptr = (uint64_t)in_buffer;
1457 inst->w4.u64 = cpt_inst_w4.u64;
1462 static __rte_always_inline int
1463 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1464 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1466 void *m_vaddr = params->meta_buf.vaddr;
1468 int32_t inputlen = 0, outputlen;
1469 struct roc_se_ctx *se_ctx;
1470 uint8_t i = 0, iv_len = 8;
1471 uint32_t encr_offset;
1472 uint32_t encr_data_len;
1475 uint64_t *offset_vaddr;
1476 union cpt_inst_w4 cpt_inst_w4;
1478 uint32_t g_size_bytes, s_size_bytes;
1479 struct roc_se_sglist_comp *gather_comp;
1480 struct roc_se_sglist_comp *scatter_comp;
1482 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1483 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1485 se_ctx = params->ctx_buf.vaddr;
1486 flags = se_ctx->zsk_flags;
1488 cpt_inst_w4.u64 = 0;
1489 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1491 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1492 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1493 (dir << 4) | (0 << 3) | (flags & 0x7));
1496 * GP op header, lengths are expected in bits.
1498 cpt_inst_w4.s.param1 = encr_data_len;
1500 /* consider iv len */
1501 encr_offset += iv_len;
1503 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1504 outputlen = inputlen;
1506 /* save space for offset ctrl & iv */
1507 offset_vaddr = m_vaddr;
1509 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1511 /* DPTR has SG list */
1512 in_buffer = m_vaddr;
1514 ((uint16_t *)in_buffer)[0] = 0;
1515 ((uint16_t *)in_buffer)[1] = 0;
1517 /* TODO Add error check if space will be sufficient */
1518 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1525 /* Offset control word followed by iv */
1526 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1527 if (unlikely((encr_offset >> 16))) {
1528 plt_dp_err("Offset not supported");
1529 plt_dp_err("enc_offset: %d", encr_offset);
1533 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1534 ROC_SE_OFF_CTRL_LEN + iv_len);
1537 memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1540 /* Add input data */
1541 size = inputlen - iv_len;
1543 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1545 if (unlikely(size)) {
1546 plt_dp_err("Insufficient buffer space,"
1552 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1553 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1556 * Output Scatter List
1560 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1564 i = fill_sg_comp(scatter_comp, i,
1565 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1567 /* Add output data */
1568 size = outputlen - iv_len;
1570 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1572 if (unlikely(size)) {
1573 plt_dp_err("Insufficient buffer space,"
1579 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1580 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1582 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1584 /* This is DPTR len in case of SG mode */
1585 cpt_inst_w4.s.dlen = size;
1587 inst->dptr = (uint64_t)in_buffer;
1588 inst->w4.u64 = cpt_inst_w4.u64;
1593 static __rte_always_inline int
1594 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1595 struct roc_se_fc_params *fc_params,
1596 struct cpt_inst_s *inst)
1598 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1602 fc_type = ctx->fc_type;
1604 if (likely(fc_type == ROC_SE_FC_GEN)) {
1605 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1606 } else if (fc_type == ROC_SE_PDCP) {
1607 ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
1608 } else if (fc_type == ROC_SE_KASUMI) {
1609 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1613 * For AUTH_ONLY case,
1614 * MC only supports digest generation and verification
1615 * should be done in software by memcmp()
1621 static __rte_always_inline int
1622 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1623 struct roc_se_fc_params *fc_params,
1624 struct cpt_inst_s *inst)
1626 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1630 fc_type = ctx->fc_type;
1632 if (likely(fc_type == ROC_SE_FC_GEN)) {
1633 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1634 } else if (fc_type == ROC_SE_PDCP) {
1635 ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
1636 } else if (fc_type == ROC_SE_KASUMI) {
1637 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1639 } else if (fc_type == ROC_SE_HASH_HMAC) {
1640 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1646 static __rte_always_inline int
1647 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1649 struct rte_crypto_aead_xform *aead_form;
1650 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1651 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1652 uint32_t cipher_key_len = 0;
1653 uint8_t aes_gcm = 0;
1654 aead_form = &xform->aead;
1656 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1657 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1658 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1659 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1660 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1661 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1663 plt_dp_err("Unknown aead operation\n");
1666 switch (aead_form->algo) {
1667 case RTE_CRYPTO_AEAD_AES_GCM:
1668 enc_type = ROC_SE_AES_GCM;
1669 cipher_key_len = 16;
1672 case RTE_CRYPTO_AEAD_AES_CCM:
1673 plt_dp_err("Crypto: Unsupported cipher algo %u",
1676 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1677 enc_type = ROC_SE_CHACHA20;
1678 auth_type = ROC_SE_POLY1305;
1679 cipher_key_len = 32;
1680 sess->chacha_poly = 1;
1683 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1687 if (aead_form->key.length < cipher_key_len) {
1688 plt_dp_err("Invalid cipher params keylen %u",
1689 aead_form->key.length);
1693 sess->aes_gcm = aes_gcm;
1694 sess->mac_len = aead_form->digest_length;
1695 sess->iv_offset = aead_form->iv.offset;
1696 sess->iv_length = aead_form->iv.length;
1697 sess->aad_length = aead_form->aad_length;
1699 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1700 aead_form->key.data,
1701 aead_form->key.length, NULL)))
1704 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1705 aead_form->digest_length)))
1711 static __rte_always_inline int
1712 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1714 struct rte_crypto_cipher_xform *c_form;
1715 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1716 uint32_t cipher_key_len = 0;
1717 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1719 c_form = &xform->cipher;
1721 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1722 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1723 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1724 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1725 if (xform->next != NULL &&
1726 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1727 /* Perform decryption followed by auth verify */
1728 sess->roc_se_ctx.template_w4.s.opcode_minor =
1729 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1732 plt_dp_err("Unknown cipher operation\n");
1736 switch (c_form->algo) {
1737 case RTE_CRYPTO_CIPHER_AES_CBC:
1738 enc_type = ROC_SE_AES_CBC;
1739 cipher_key_len = 16;
1741 case RTE_CRYPTO_CIPHER_3DES_CBC:
1742 enc_type = ROC_SE_DES3_CBC;
1743 cipher_key_len = 24;
1745 case RTE_CRYPTO_CIPHER_DES_CBC:
1746 /* DES is implemented using 3DES in hardware */
1747 enc_type = ROC_SE_DES3_CBC;
1750 case RTE_CRYPTO_CIPHER_AES_CTR:
1751 enc_type = ROC_SE_AES_CTR;
1752 cipher_key_len = 16;
1755 case RTE_CRYPTO_CIPHER_NULL:
1759 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1760 enc_type = ROC_SE_KASUMI_F8_ECB;
1761 cipher_key_len = 16;
1762 zsk_flag = ROC_SE_K_F8;
1764 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1765 enc_type = ROC_SE_SNOW3G_UEA2;
1766 cipher_key_len = 16;
1767 zsk_flag = ROC_SE_ZS_EA;
1769 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1770 enc_type = ROC_SE_ZUC_EEA3;
1771 cipher_key_len = c_form->key.length;
1772 zsk_flag = ROC_SE_ZS_EA;
1774 case RTE_CRYPTO_CIPHER_AES_XTS:
1775 enc_type = ROC_SE_AES_XTS;
1776 cipher_key_len = 16;
1778 case RTE_CRYPTO_CIPHER_3DES_ECB:
1779 enc_type = ROC_SE_DES3_ECB;
1780 cipher_key_len = 24;
1782 case RTE_CRYPTO_CIPHER_AES_ECB:
1783 enc_type = ROC_SE_AES_ECB;
1784 cipher_key_len = 16;
1786 case RTE_CRYPTO_CIPHER_3DES_CTR:
1787 case RTE_CRYPTO_CIPHER_AES_F8:
1788 case RTE_CRYPTO_CIPHER_ARC4:
1789 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1792 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1797 if (c_form->key.length < cipher_key_len) {
1798 plt_dp_err("Invalid cipher params keylen %u",
1799 c_form->key.length);
1803 sess->zsk_flag = zsk_flag;
1805 sess->aes_ctr = aes_ctr;
1806 sess->iv_offset = c_form->iv.offset;
1807 sess->iv_length = c_form->iv.length;
1808 sess->is_null = is_null;
1810 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1811 c_form->key.data, c_form->key.length,
1815 if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1816 roc_se_ctx_swap(&sess->roc_se_ctx);
1820 static __rte_always_inline int
1821 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1823 struct rte_crypto_auth_xform *a_form;
1824 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1825 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1827 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
1828 return fill_sess_gmac(xform, sess);
1830 if (xform->next != NULL &&
1831 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1832 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1833 /* Perform auth followed by encryption */
1834 sess->roc_se_ctx.template_w4.s.opcode_minor =
1835 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1838 a_form = &xform->auth;
1840 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1841 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1842 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1843 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1845 plt_dp_err("Unknown auth operation");
1849 switch (a_form->algo) {
1850 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1852 case RTE_CRYPTO_AUTH_SHA1:
1853 auth_type = ROC_SE_SHA1_TYPE;
1855 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1856 case RTE_CRYPTO_AUTH_SHA256:
1857 auth_type = ROC_SE_SHA2_SHA256;
1859 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1860 case RTE_CRYPTO_AUTH_SHA512:
1861 auth_type = ROC_SE_SHA2_SHA512;
1863 case RTE_CRYPTO_AUTH_AES_GMAC:
1864 auth_type = ROC_SE_GMAC_TYPE;
1867 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1868 case RTE_CRYPTO_AUTH_SHA224:
1869 auth_type = ROC_SE_SHA2_SHA224;
1871 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1872 case RTE_CRYPTO_AUTH_SHA384:
1873 auth_type = ROC_SE_SHA2_SHA384;
1875 case RTE_CRYPTO_AUTH_MD5_HMAC:
1876 case RTE_CRYPTO_AUTH_MD5:
1877 auth_type = ROC_SE_MD5_TYPE;
1879 case RTE_CRYPTO_AUTH_KASUMI_F9:
1880 auth_type = ROC_SE_KASUMI_F9_ECB;
1882 * Indicate that direction needs to be taken out
1885 zsk_flag = ROC_SE_K_F9;
1887 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1888 auth_type = ROC_SE_SNOW3G_UIA2;
1889 zsk_flag = ROC_SE_ZS_IA;
1891 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1892 auth_type = ROC_SE_ZUC_EIA3;
1893 zsk_flag = ROC_SE_ZS_IA;
1895 case RTE_CRYPTO_AUTH_NULL:
1899 case RTE_CRYPTO_AUTH_AES_CMAC:
1900 auth_type = ROC_SE_AES_CMAC_EIA2;
1901 zsk_flag = ROC_SE_ZS_IA;
1903 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1904 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1905 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1908 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1913 sess->zsk_flag = zsk_flag;
1914 sess->aes_gcm = aes_gcm;
1915 sess->mac_len = a_form->digest_length;
1916 sess->is_null = is_null;
1918 sess->auth_iv_offset = a_form->iv.offset;
1919 sess->auth_iv_length = a_form->iv.length;
1921 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1922 a_form->key.data, a_form->key.length,
1923 a_form->digest_length)))
1926 if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1927 (auth_type <= ROC_SE_AES_CMAC_EIA2))
1928 roc_se_ctx_swap(&sess->roc_se_ctx);
1933 static __rte_always_inline int
1934 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1936 struct rte_crypto_auth_xform *a_form;
1937 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1938 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1940 a_form = &xform->auth;
1942 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1943 sess->cpt_op |= ROC_SE_OP_ENCODE;
1944 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1945 sess->cpt_op |= ROC_SE_OP_DECODE;
1947 plt_dp_err("Unknown auth operation");
1951 switch (a_form->algo) {
1952 case RTE_CRYPTO_AUTH_AES_GMAC:
1953 enc_type = ROC_SE_AES_GCM;
1954 auth_type = ROC_SE_GMAC_TYPE;
1957 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1965 sess->iv_offset = a_form->iv.offset;
1966 sess->iv_length = a_form->iv.length;
1967 sess->mac_len = a_form->digest_length;
1969 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1970 a_form->key.data, a_form->key.length,
1974 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1975 a_form->digest_length)))
1981 static __rte_always_inline void *
1982 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1983 struct rte_mempool *cpt_meta_pool,
1984 struct cpt_inflight_req *infl_req)
1988 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1994 infl_req->mdata = mdata;
1995 infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
2000 static __rte_always_inline uint32_t
2001 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2002 uint32_t start_offset)
2005 void *seg_data = NULL;
2006 int32_t seg_size = 0;
2013 if (!start_offset) {
2014 seg_data = rte_pktmbuf_mtod(pkt, void *);
2015 seg_size = pkt->data_len;
2017 while (start_offset >= pkt->data_len) {
2018 start_offset -= pkt->data_len;
2022 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2023 seg_size = pkt->data_len - start_offset;
2029 iovec->bufs[index].vaddr = seg_data;
2030 iovec->bufs[index].size = seg_size;
2034 while (unlikely(pkt != NULL)) {
2035 seg_data = rte_pktmbuf_mtod(pkt, void *);
2036 seg_size = pkt->data_len;
2040 iovec->bufs[index].vaddr = seg_data;
2041 iovec->bufs[index].size = seg_size;
2048 iovec->buf_cnt = index;
2052 static __rte_always_inline void
2053 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2054 struct roc_se_fc_params *param, uint32_t *flags)
2057 void *seg_data = NULL;
2058 uint32_t seg_size = 0;
2059 struct roc_se_iov_ptr *iovec;
2061 seg_data = rte_pktmbuf_mtod(pkt, void *);
2062 seg_size = pkt->data_len;
2065 if (likely(!pkt->next)) {
2068 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2069 headroom = rte_pktmbuf_headroom(pkt);
2070 if (likely(headroom >= 24))
2071 *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2073 param->bufs[0].vaddr = seg_data;
2074 param->bufs[0].size = seg_size;
2077 iovec = param->src_iov;
2078 iovec->bufs[index].vaddr = seg_data;
2079 iovec->bufs[index].size = seg_size;
2083 while (unlikely(pkt != NULL)) {
2084 seg_data = rte_pktmbuf_mtod(pkt, void *);
2085 seg_size = pkt->data_len;
2090 iovec->bufs[index].vaddr = seg_data;
2091 iovec->bufs[index].size = seg_size;
2098 iovec->buf_cnt = index;
2102 static __rte_always_inline int
2103 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2104 struct cpt_qp_meta_info *m_info,
2105 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2107 struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2108 uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2109 struct rte_crypto_sym_op *sym_op = cop->sym;
2111 uint32_t mc_hash_off;
2113 uint64_t d_offs, d_lens;
2114 struct rte_mbuf *m_src, *m_dst;
2115 uint8_t cpt_op = sess->cpt_op;
2116 #ifdef CPT_ALWAYS_USE_SG_MODE
2117 uint8_t inplace = 0;
2119 uint8_t inplace = 1;
2121 struct roc_se_fc_params fc_params;
2122 char src[SRC_IOV_SIZE];
2123 char dst[SRC_IOV_SIZE];
2127 fc_params.cipher_iv_len = sess->iv_length;
2128 fc_params.auth_iv_len = sess->auth_iv_length;
2130 if (likely(sess->iv_length)) {
2131 flags |= ROC_SE_VALID_IV_BUF;
2132 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2134 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2135 memcpy((uint8_t *)iv_buf,
2136 rte_crypto_op_ctod_offset(cop, uint8_t *,
2139 iv_buf[3] = rte_cpu_to_be_32(0x1);
2140 fc_params.iv_buf = iv_buf;
2144 if (sess->zsk_flag) {
2145 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2146 cop, uint8_t *, sess->auth_iv_offset);
2147 if (sess->zsk_flag != ROC_SE_ZS_EA)
2150 m_src = sym_op->m_src;
2151 m_dst = sym_op->m_dst;
2153 if (sess->aes_gcm || sess->chacha_poly) {
2158 d_offs = sym_op->aead.data.offset;
2159 d_lens = sym_op->aead.data.length;
2161 sym_op->aead.data.offset + sym_op->aead.data.length;
2163 aad_data = sym_op->aead.aad.data;
2164 aad_len = sess->aad_length;
2165 if (likely((aad_data + aad_len) ==
2166 rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2167 sym_op->aead.data.offset))) {
2168 d_offs = (d_offs - aad_len) | (d_offs << 16);
2169 d_lens = (d_lens + aad_len) | (d_lens << 32);
2171 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2172 fc_params.aad_buf.size = aad_len;
2173 flags |= ROC_SE_VALID_AAD_BUF;
2175 d_offs = d_offs << 16;
2176 d_lens = d_lens << 32;
2179 salt = fc_params.iv_buf;
2180 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2181 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2182 sess->salt = *(uint32_t *)salt;
2184 fc_params.iv_buf = salt + 4;
2185 if (likely(sess->mac_len)) {
2186 struct rte_mbuf *m =
2187 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2192 /* hmac immediately following data is best case */
2193 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2195 (uint8_t *)sym_op->aead.digest.data)) {
2196 flags |= ROC_SE_VALID_MAC_BUF;
2197 fc_params.mac_buf.size = sess->mac_len;
2198 fc_params.mac_buf.vaddr =
2199 sym_op->aead.digest.data;
2204 d_offs = sym_op->cipher.data.offset;
2205 d_lens = sym_op->cipher.data.length;
2207 sym_op->cipher.data.offset + sym_op->cipher.data.length;
2208 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2209 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2212 (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2213 mc_hash_off = (sym_op->auth.data.offset +
2214 sym_op->auth.data.length);
2216 /* for gmac, salt should be updated like in gcm */
2217 if (unlikely(sess->is_gmac)) {
2219 salt = fc_params.iv_buf;
2220 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2221 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2222 sess->salt = *(uint32_t *)salt;
2224 fc_params.iv_buf = salt + 4;
2226 if (likely(sess->mac_len)) {
2229 m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2233 /* hmac immediately following data is best case */
2234 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2235 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2237 (uint8_t *)sym_op->auth.digest.data))) {
2238 flags |= ROC_SE_VALID_MAC_BUF;
2239 fc_params.mac_buf.size = sess->mac_len;
2240 fc_params.mac_buf.vaddr =
2241 sym_op->auth.digest.data;
2246 fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2248 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2249 unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2252 if (likely(!m_dst && inplace)) {
2253 /* Case of single buffer without AAD buf or
2254 * separate mac buf in place and
2257 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2259 prepare_iov_from_pkt_inplace(m_src, &fc_params, &flags);
2262 /* Out of place processing */
2263 fc_params.src_iov = (void *)src;
2264 fc_params.dst_iov = (void *)dst;
2266 /* Store SG I/O in the api for reuse */
2267 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2268 plt_dp_err("Prepare src iov failed");
2273 if (unlikely(m_dst != NULL)) {
2276 /* Try to make room as much as src has */
2277 pkt_len = rte_pktmbuf_pkt_len(m_dst);
2279 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2280 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2281 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2282 plt_dp_err("Not enough space in "
2291 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2292 plt_dp_err("Prepare dst iov failed for "
2299 fc_params.dst_iov = (void *)src;
2303 if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2304 (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2305 ((ctx->fc_type == ROC_SE_FC_GEN) ||
2306 (ctx->fc_type == ROC_SE_PDCP))))) {
2307 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2308 m_info->pool, infl_req);
2309 if (mdata == NULL) {
2310 plt_dp_err("Error allocating meta buffer for request");
2315 /* Finally prepare the instruction */
2316 if (cpt_op & ROC_SE_OP_ENCODE)
2317 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2320 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2323 if (unlikely(ret)) {
2324 plt_dp_err("Preparing request failed due to bad input arg");
2325 goto free_mdata_and_exit;
2330 free_mdata_and_exit:
2331 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2332 rte_mempool_put(m_info->pool, infl_req->mdata);
2337 static __rte_always_inline void
2338 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2341 struct rte_crypto_sym_op *sym_op = op->sym;
2343 if (sym_op->auth.digest.data)
2344 mac = sym_op->auth.digest.data;
2346 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2347 sym_op->auth.data.length +
2348 sym_op->auth.data.offset);
2350 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2354 if (memcmp(mac, gen_mac, mac_len))
2355 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2357 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2360 static __rte_always_inline void
2361 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2362 uint32_t *addr_length_in_bits,
2363 uint8_t *addr_direction)
2368 while (!found && counter_num_bytes > 0) {
2369 counter_num_bytes--;
2370 if (src[counter_num_bytes] == 0x00)
2372 pos = rte_bsf32(src[counter_num_bytes]);
2374 if (likely(counter_num_bytes > 0)) {
2375 last_byte = src[counter_num_bytes - 1];
2376 *addr_direction = last_byte & 0x1;
2377 *addr_length_in_bits =
2378 counter_num_bytes * 8 - 1;
2381 last_byte = src[counter_num_bytes];
2382 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2383 *addr_length_in_bits =
2384 counter_num_bytes * 8 + (8 - (pos + 2));
2391 * This handles all auth only except AES_GMAC
2393 static __rte_always_inline int
2394 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2395 struct cpt_qp_meta_info *m_info,
2396 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2399 struct rte_crypto_sym_op *sym_op = cop->sym;
2401 uint32_t auth_range_off;
2403 uint64_t d_offs = 0, d_lens;
2404 struct rte_mbuf *m_src, *m_dst;
2405 uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2406 uint16_t mac_len = sess->mac_len;
2407 struct roc_se_fc_params params;
2408 char src[SRC_IOV_SIZE];
2412 memset(¶ms, 0, sizeof(struct roc_se_fc_params));
2414 m_src = sym_op->m_src;
2416 mdata = alloc_op_meta(¶ms.meta_buf, m_info->mlen, m_info->pool,
2418 if (mdata == NULL) {
2423 auth_range_off = sym_op->auth.data.offset;
2425 flags = ROC_SE_VALID_MAC_BUF;
2426 params.src_iov = (void *)src;
2427 if (unlikely(sess->zsk_flag)) {
2429 * Since for Zuc, Kasumi, Snow3g offsets are in bits
2430 * we will send pass through even for auth only case,
2433 d_offs = auth_range_off;
2435 params.auth_iv_len = sess->auth_iv_length;
2436 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2437 cop, uint8_t *, sess->auth_iv_offset);
2438 if (sess->zsk_flag == ROC_SE_K_F9) {
2439 uint32_t length_in_bits, num_bytes;
2440 uint8_t *src, direction = 0;
2443 rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2445 * This is kasumi f9, take direction from
2448 length_in_bits = cop->sym->auth.data.length;
2449 num_bytes = (length_in_bits >> 3);
2450 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2451 find_kasumif9_direction_and_length(
2452 src, num_bytes, &length_in_bits, &direction);
2453 length_in_bits -= 64;
2454 cop->sym->auth.data.offset += 64;
2455 d_offs = cop->sym->auth.data.offset;
2456 auth_range_off = d_offs / 8;
2457 cop->sym->auth.data.length = length_in_bits;
2459 /* Store it at end of auth iv */
2460 iv_buf[8] = direction;
2461 params.auth_iv_buf = iv_buf;
2465 d_lens = sym_op->auth.data.length;
2467 params.ctx_buf.vaddr = &sess->roc_se_ctx;
2469 if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2470 if (sym_op->auth.digest.data) {
2472 * Digest to be generated
2473 * in separate buffer
2475 params.mac_buf.size = sess->mac_len;
2476 params.mac_buf.vaddr = sym_op->auth.digest.data;
2478 uint32_t off = sym_op->auth.data.offset +
2479 sym_op->auth.data.length;
2480 int32_t dlen, space;
2482 m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2483 dlen = rte_pktmbuf_pkt_len(m_dst);
2485 space = off + mac_len - dlen;
2487 if (!rte_pktmbuf_append(m_dst, space)) {
2488 plt_dp_err("Failed to extend "
2492 goto free_mdata_and_exit;
2495 params.mac_buf.vaddr =
2496 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2497 params.mac_buf.size = mac_len;
2500 uint64_t *op = mdata;
2502 /* Need space for storing generated mac */
2503 space += 2 * sizeof(uint64_t);
2505 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2506 params.mac_buf.size = mac_len;
2507 space += RTE_ALIGN_CEIL(mac_len, 8);
2508 op[0] = (uintptr_t)params.mac_buf.vaddr;
2510 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2513 params.meta_buf.vaddr = (uint8_t *)mdata + space;
2514 params.meta_buf.size -= space;
2516 /* Out of place processing */
2517 params.src_iov = (void *)src;
2519 /*Store SG I/O in the api for reuse */
2520 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2521 plt_dp_err("Prepare src iov failed");
2523 goto free_mdata_and_exit;
2526 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, inst);
2528 goto free_mdata_and_exit;
2532 free_mdata_and_exit:
2533 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2534 rte_mempool_put(m_info->pool, infl_req->mdata);
2538 #endif /*_CNXK_SE_H_ */