1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
12 #define SRC_IOV_SIZE \
13 (sizeof(struct roc_se_iov_ptr) + \
14 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE \
16 (sizeof(struct roc_se_iov_ptr) + \
17 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
21 uint16_t zsk_flag : 4;
24 uint16_t chacha_poly : 1;
31 uint8_t auth_iv_length;
33 uint16_t auth_iv_offset;
36 struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
42 uint16_t mac_len = auth->digest_length;
46 case RTE_CRYPTO_AUTH_MD5:
47 case RTE_CRYPTO_AUTH_MD5_HMAC:
48 ret = (mac_len == 16) ? 0 : -1;
50 case RTE_CRYPTO_AUTH_SHA1:
51 case RTE_CRYPTO_AUTH_SHA1_HMAC:
52 ret = (mac_len == 20) ? 0 : -1;
54 case RTE_CRYPTO_AUTH_SHA224:
55 case RTE_CRYPTO_AUTH_SHA224_HMAC:
56 ret = (mac_len == 28) ? 0 : -1;
58 case RTE_CRYPTO_AUTH_SHA256:
59 case RTE_CRYPTO_AUTH_SHA256_HMAC:
60 ret = (mac_len == 32) ? 0 : -1;
62 case RTE_CRYPTO_AUTH_SHA384:
63 case RTE_CRYPTO_AUTH_SHA384_HMAC:
64 ret = (mac_len == 48) ? 0 : -1;
66 case RTE_CRYPTO_AUTH_SHA512:
67 case RTE_CRYPTO_AUTH_SHA512_HMAC:
68 ret = (mac_len == 64) ? 0 : -1;
70 case RTE_CRYPTO_AUTH_NULL:
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
83 struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
91 struct roc_se_sglist_comp *to = &list[i >> 2];
93 to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94 to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101 struct roc_se_buf_ptr *from)
103 struct roc_se_sglist_comp *to = &list[i >> 2];
105 to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113 struct roc_se_buf_ptr *from, uint32_t *psize)
115 struct roc_se_sglist_comp *to = &list[i >> 2];
116 uint32_t size = *psize;
119 e_len = (size > from->size) ? from->size : size;
120 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
128 * This fills the MC expected SGIO list
129 * from IOV given by user.
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133 struct roc_se_iov_ptr *from, uint32_t from_offset,
134 uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135 uint32_t extra_offset)
138 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139 uint32_t size = *psize;
140 struct roc_se_buf_ptr *bufs;
143 for (j = 0; (j < from->buf_cnt) && size; j++) {
146 struct roc_se_sglist_comp *to = &list[i >> 2];
148 if (unlikely(from_offset)) {
149 if (from_offset >= bufs[j].size) {
150 from_offset -= bufs[j].size;
153 e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154 e_len = (size > (bufs[j].size - from_offset)) ?
155 (bufs[j].size - from_offset) :
159 e_vaddr = (uint64_t)bufs[j].vaddr;
160 e_len = (size > bufs[j].size) ? bufs[j].size : size;
163 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
166 if (extra_len && (e_len >= extra_offset)) {
167 /* Break the data at given offset */
168 uint32_t next_len = e_len - extra_offset;
169 uint64_t next_vaddr = e_vaddr + extra_offset;
174 e_len = extra_offset;
176 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
179 extra_len = RTE_MIN(extra_len, size);
180 /* Insert extra data ptr */
185 rte_cpu_to_be_16(extra_len);
186 to->ptr[i % 4] = rte_cpu_to_be_64(
187 (uint64_t)extra_buf->vaddr);
191 next_len = RTE_MIN(next_len, size);
192 /* insert the rest of the data */
196 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
206 extra_offset -= size;
214 static __rte_always_inline int
215 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
216 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
218 void *m_vaddr = params->meta_buf.vaddr;
220 uint16_t data_len, mac_len, key_len;
221 roc_se_auth_type hash_type;
222 struct roc_se_ctx *ctx;
223 struct roc_se_sglist_comp *gather_comp;
224 struct roc_se_sglist_comp *scatter_comp;
226 uint32_t g_size_bytes, s_size_bytes;
227 union cpt_inst_w4 cpt_inst_w4;
229 ctx = params->ctx_buf.vaddr;
231 hash_type = ctx->hash_type;
232 mac_len = ctx->mac_len;
233 key_len = ctx->auth_key_len;
234 data_len = ROC_SE_AUTH_DLEN(d_lens);
237 cpt_inst_w4.s.opcode_minor = 0;
238 cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
240 cpt_inst_w4.s.opcode_major =
241 ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
242 cpt_inst_w4.s.param1 = key_len;
243 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
245 cpt_inst_w4.s.opcode_major =
246 ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
247 cpt_inst_w4.s.param1 = 0;
248 cpt_inst_w4.s.dlen = data_len;
251 /* Null auth only case enters the if */
252 if (unlikely(!hash_type && !ctx->enc_cipher)) {
253 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
254 /* Minor op is passthrough */
255 cpt_inst_w4.s.opcode_minor = 0x03;
256 /* Send out completion code only */
257 cpt_inst_w4.s.param2 = 0x1;
260 /* DPTR has SG list */
263 ((uint16_t *)in_buffer)[0] = 0;
264 ((uint16_t *)in_buffer)[1] = 0;
266 /* TODO Add error check if space will be sufficient */
267 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
276 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
278 i = fill_sg_comp(gather_comp, i, k_vaddr,
279 RTE_ALIGN_CEIL(key_len, 8));
285 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
287 if (unlikely(size)) {
288 plt_dp_err("Insufficient dst IOV size, short by %dB",
294 * Looks like we need to support zero data
295 * gather ptr in case of hash & hmac
299 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
300 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
307 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
310 if (flags & ROC_SE_VALID_MAC_BUF) {
311 if (unlikely(params->mac_buf.size < mac_len)) {
312 plt_dp_err("Insufficient MAC size");
317 i = fill_sg_comp_from_buf_min(scatter_comp, i, ¶ms->mac_buf,
321 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
322 data_len, &size, NULL, 0);
323 if (unlikely(size)) {
324 plt_dp_err("Insufficient dst IOV size, short by %dB",
330 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
331 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
333 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
335 /* This is DPTR len in case of SG mode */
336 cpt_inst_w4.s.dlen = size;
338 inst->dptr = (uint64_t)in_buffer;
339 inst->w4.u64 = cpt_inst_w4.u64;
344 static __rte_always_inline int
345 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
346 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
348 uint32_t iv_offset = 0;
349 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
350 struct roc_se_ctx *se_ctx;
351 uint32_t cipher_type, hash_type;
352 uint32_t mac_len, size;
354 struct roc_se_buf_ptr *aad_buf = NULL;
355 uint32_t encr_offset, auth_offset;
356 uint32_t encr_data_len, auth_data_len, aad_len = 0;
357 uint32_t passthrough_len = 0;
358 union cpt_inst_w4 cpt_inst_w4;
362 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
363 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
364 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
365 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
366 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
367 /* We don't support both AAD and auth data separately */
370 aad_len = fc_params->aad_buf.size;
371 aad_buf = &fc_params->aad_buf;
373 se_ctx = fc_params->ctx_buf.vaddr;
374 cipher_type = se_ctx->enc_cipher;
375 hash_type = se_ctx->hash_type;
376 mac_len = se_ctx->mac_len;
377 op_minor = se_ctx->template_w4.s.opcode_minor;
379 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
381 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
384 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
386 * When AAD is given, data above encr_offset is pass through
387 * Since AAD is given as separate pointer and not as offset,
388 * this is a special case as we need to fragment input data
389 * into passthrough + encr_data and then insert AAD in between.
391 if (hash_type != ROC_SE_GMAC_TYPE) {
392 passthrough_len = encr_offset;
393 auth_offset = passthrough_len + iv_len;
394 encr_offset = passthrough_len + aad_len + iv_len;
395 auth_data_len = aad_len + encr_data_len;
397 passthrough_len = 16 + aad_len;
398 auth_offset = passthrough_len + iv_len;
399 auth_data_len = aad_len;
402 encr_offset += iv_len;
403 auth_offset += iv_len;
407 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
408 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
409 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
411 if (hash_type == ROC_SE_GMAC_TYPE) {
416 auth_dlen = auth_offset + auth_data_len;
417 enc_dlen = encr_data_len + encr_offset;
418 if (unlikely(encr_data_len & 0xf)) {
419 if ((cipher_type == ROC_SE_DES3_CBC) ||
420 (cipher_type == ROC_SE_DES3_ECB))
422 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
423 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
424 (cipher_type == ROC_SE_AES_ECB)))
426 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
429 if (unlikely(auth_dlen > enc_dlen)) {
430 inputlen = auth_dlen;
431 outputlen = auth_dlen + mac_len;
434 outputlen = enc_dlen + mac_len;
437 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
438 outputlen = enc_dlen;
441 cpt_inst_w4.s.param1 = encr_data_len;
442 cpt_inst_w4.s.param2 = auth_data_len;
445 * In cn9k, cn10k since we have a limitation of
446 * IV & Offset control word not part of instruction
447 * and need to be part of Data Buffer, we check if
448 * head room is there and then only do the Direct mode processing
450 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
451 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
452 void *dm_vaddr = fc_params->bufs[0].vaddr;
454 /* Use Direct mode */
457 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
460 inst->dptr = (uint64_t)offset_vaddr;
462 /* RPTR should just exclude offset control word */
463 inst->rptr = (uint64_t)dm_vaddr - iv_len;
465 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
467 if (likely(iv_len)) {
468 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
469 ROC_SE_OFF_CTRL_LEN);
470 uint64_t *src = fc_params->iv_buf;
476 void *m_vaddr = fc_params->meta_buf.vaddr;
477 uint32_t i, g_size_bytes, s_size_bytes;
478 struct roc_se_sglist_comp *gather_comp;
479 struct roc_se_sglist_comp *scatter_comp;
482 /* This falls under strict SG mode */
483 offset_vaddr = m_vaddr;
484 size = ROC_SE_OFF_CTRL_LEN + iv_len;
486 m_vaddr = (uint8_t *)m_vaddr + size;
488 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
490 if (likely(iv_len)) {
491 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
492 ROC_SE_OFF_CTRL_LEN);
493 uint64_t *src = fc_params->iv_buf;
498 /* DPTR has SG list */
501 ((uint16_t *)in_buffer)[0] = 0;
502 ((uint16_t *)in_buffer)[1] = 0;
504 /* TODO Add error check if space will be sufficient */
506 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
514 /* Offset control word that includes iv */
515 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
516 ROC_SE_OFF_CTRL_LEN + iv_len);
519 size = inputlen - iv_len;
521 uint32_t aad_offset = aad_len ? passthrough_len : 0;
523 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
524 i = fill_sg_comp_from_buf_min(
525 gather_comp, i, fc_params->bufs, &size);
527 i = fill_sg_comp_from_iov(
528 gather_comp, i, fc_params->src_iov, 0,
529 &size, aad_buf, aad_offset);
532 if (unlikely(size)) {
533 plt_dp_err("Insufficient buffer space,"
539 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
541 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
544 * Output Scatter list
548 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
552 if (likely(iv_len)) {
553 i = fill_sg_comp(scatter_comp, i,
554 (uint64_t)offset_vaddr +
559 /* output data or output data + digest*/
560 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
561 size = outputlen - iv_len - mac_len;
563 uint32_t aad_offset =
564 aad_len ? passthrough_len : 0;
567 ROC_SE_SINGLE_BUF_INPLACE)) {
568 i = fill_sg_comp_from_buf_min(
570 fc_params->bufs, &size);
572 i = fill_sg_comp_from_iov(
574 fc_params->dst_iov, 0, &size,
575 aad_buf, aad_offset);
577 if (unlikely(size)) {
578 plt_dp_err("Insufficient buffer"
579 " space, size %d needed",
586 i = fill_sg_comp_from_buf(scatter_comp, i,
587 &fc_params->mac_buf);
590 /* Output including mac */
591 size = outputlen - iv_len;
593 uint32_t aad_offset =
594 aad_len ? passthrough_len : 0;
597 ROC_SE_SINGLE_BUF_INPLACE)) {
598 i = fill_sg_comp_from_buf_min(
600 fc_params->bufs, &size);
602 i = fill_sg_comp_from_iov(
604 fc_params->dst_iov, 0, &size,
605 aad_buf, aad_offset);
607 if (unlikely(size)) {
608 plt_dp_err("Insufficient buffer"
609 " space, size %d needed",
615 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
617 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
619 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
621 /* This is DPTR len in case of SG mode */
622 cpt_inst_w4.s.dlen = size;
624 inst->dptr = (uint64_t)in_buffer;
627 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
628 (auth_offset >> 8))) {
629 plt_dp_err("Offset not supported");
630 plt_dp_err("enc_offset: %d", encr_offset);
631 plt_dp_err("iv_offset : %d", iv_offset);
632 plt_dp_err("auth_offset: %d", auth_offset);
636 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
637 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
638 ((uint64_t)auth_offset));
640 inst->w4.u64 = cpt_inst_w4.u64;
644 static __rte_always_inline int
645 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
646 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
648 uint32_t iv_offset = 0, size;
649 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
650 struct roc_se_ctx *se_ctx;
651 int32_t hash_type, mac_len;
653 struct roc_se_buf_ptr *aad_buf = NULL;
654 uint32_t encr_offset, auth_offset;
655 uint32_t encr_data_len, auth_data_len, aad_len = 0;
656 uint32_t passthrough_len = 0;
657 union cpt_inst_w4 cpt_inst_w4;
661 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
662 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
663 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
664 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
666 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
667 /* We don't support both AAD and auth data separately */
670 aad_len = fc_params->aad_buf.size;
671 aad_buf = &fc_params->aad_buf;
674 se_ctx = fc_params->ctx_buf.vaddr;
675 hash_type = se_ctx->hash_type;
676 mac_len = se_ctx->mac_len;
677 op_minor = se_ctx->template_w4.s.opcode_minor;
679 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
681 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
684 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
686 * When AAD is given, data above encr_offset is pass through
687 * Since AAD is given as separate pointer and not as offset,
688 * this is a special case as we need to fragment input data
689 * into passthrough + encr_data and then insert AAD in between.
691 if (hash_type != ROC_SE_GMAC_TYPE) {
692 passthrough_len = encr_offset;
693 auth_offset = passthrough_len + iv_len;
694 encr_offset = passthrough_len + aad_len + iv_len;
695 auth_data_len = aad_len + encr_data_len;
697 passthrough_len = 16 + aad_len;
698 auth_offset = passthrough_len + iv_len;
699 auth_data_len = aad_len;
702 encr_offset += iv_len;
703 auth_offset += iv_len;
707 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
708 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
709 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
711 if (hash_type == ROC_SE_GMAC_TYPE) {
716 enc_dlen = encr_offset + encr_data_len;
717 auth_dlen = auth_offset + auth_data_len;
719 if (auth_dlen > enc_dlen) {
720 inputlen = auth_dlen + mac_len;
721 outputlen = auth_dlen;
723 inputlen = enc_dlen + mac_len;
724 outputlen = enc_dlen;
727 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
728 outputlen = inputlen = enc_dlen;
730 cpt_inst_w4.s.param1 = encr_data_len;
731 cpt_inst_w4.s.param2 = auth_data_len;
734 * In cn9k, cn10k since we have a limitation of
735 * IV & Offset control word not part of instruction
736 * and need to be part of Data Buffer, we check if
737 * head room is there and then only do the Direct mode processing
739 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
740 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
741 void *dm_vaddr = fc_params->bufs[0].vaddr;
743 /* Use Direct mode */
746 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
747 inst->dptr = (uint64_t)offset_vaddr;
749 /* RPTR should just exclude offset control word */
750 inst->rptr = (uint64_t)dm_vaddr - iv_len;
752 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
754 if (likely(iv_len)) {
755 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
756 ROC_SE_OFF_CTRL_LEN);
757 uint64_t *src = fc_params->iv_buf;
763 void *m_vaddr = fc_params->meta_buf.vaddr;
764 uint32_t g_size_bytes, s_size_bytes;
765 struct roc_se_sglist_comp *gather_comp;
766 struct roc_se_sglist_comp *scatter_comp;
770 /* This falls under strict SG mode */
771 offset_vaddr = m_vaddr;
772 size = ROC_SE_OFF_CTRL_LEN + iv_len;
774 m_vaddr = (uint8_t *)m_vaddr + size;
776 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
778 if (likely(iv_len)) {
779 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
780 ROC_SE_OFF_CTRL_LEN);
781 uint64_t *src = fc_params->iv_buf;
786 /* DPTR has SG list */
789 ((uint16_t *)in_buffer)[0] = 0;
790 ((uint16_t *)in_buffer)[1] = 0;
792 /* TODO Add error check if space will be sufficient */
794 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
801 /* Offset control word that includes iv */
802 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
803 ROC_SE_OFF_CTRL_LEN + iv_len);
806 if (flags & ROC_SE_VALID_MAC_BUF) {
807 size = inputlen - iv_len - mac_len;
809 /* input data only */
811 ROC_SE_SINGLE_BUF_INPLACE)) {
812 i = fill_sg_comp_from_buf_min(
813 gather_comp, i, fc_params->bufs,
816 uint32_t aad_offset =
817 aad_len ? passthrough_len : 0;
819 i = fill_sg_comp_from_iov(
821 fc_params->src_iov, 0, &size,
822 aad_buf, aad_offset);
824 if (unlikely(size)) {
825 plt_dp_err("Insufficient buffer"
826 " space, size %d needed",
834 i = fill_sg_comp_from_buf(gather_comp, i,
835 &fc_params->mac_buf);
838 /* input data + mac */
839 size = inputlen - iv_len;
842 ROC_SE_SINGLE_BUF_INPLACE)) {
843 i = fill_sg_comp_from_buf_min(
844 gather_comp, i, fc_params->bufs,
847 uint32_t aad_offset =
848 aad_len ? passthrough_len : 0;
850 if (unlikely(!fc_params->src_iov)) {
851 plt_dp_err("Bad input args");
855 i = fill_sg_comp_from_iov(
857 fc_params->src_iov, 0, &size,
858 aad_buf, aad_offset);
861 if (unlikely(size)) {
862 plt_dp_err("Insufficient buffer"
863 " space, size %d needed",
869 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
871 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
874 * Output Scatter List
879 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
884 i = fill_sg_comp(scatter_comp, i,
885 (uint64_t)offset_vaddr +
890 /* Add output data */
891 size = outputlen - iv_len;
893 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
894 /* handle single buffer here */
895 i = fill_sg_comp_from_buf_min(scatter_comp, i,
899 uint32_t aad_offset =
900 aad_len ? passthrough_len : 0;
902 if (unlikely(!fc_params->dst_iov)) {
903 plt_dp_err("Bad input args");
907 i = fill_sg_comp_from_iov(
908 scatter_comp, i, fc_params->dst_iov, 0,
909 &size, aad_buf, aad_offset);
912 if (unlikely(size)) {
913 plt_dp_err("Insufficient buffer space,"
920 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
922 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
924 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
926 /* This is DPTR len in case of SG mode */
927 cpt_inst_w4.s.dlen = size;
929 inst->dptr = (uint64_t)in_buffer;
932 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
933 (auth_offset >> 8))) {
934 plt_dp_err("Offset not supported");
935 plt_dp_err("enc_offset: %d", encr_offset);
936 plt_dp_err("iv_offset : %d", iv_offset);
937 plt_dp_err("auth_offset: %d", auth_offset);
941 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
942 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
943 ((uint64_t)auth_offset));
945 inst->w4.u64 = cpt_inst_w4.u64;
949 static __rte_always_inline int
950 cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
951 struct roc_se_fc_params *params,
952 struct cpt_inst_s *inst)
955 int32_t inputlen, outputlen;
956 struct roc_se_ctx *se_ctx;
957 uint32_t mac_len = 0;
958 uint8_t pdcp_alg_type, j;
959 uint32_t encr_offset = 0, auth_offset = 0;
960 uint32_t encr_data_len = 0, auth_data_len = 0;
961 int flags, iv_len = 16;
962 uint64_t offset_ctrl;
963 uint64_t *offset_vaddr;
964 uint32_t *iv_s, iv[4];
965 union cpt_inst_w4 cpt_inst_w4;
967 se_ctx = params->ctx_buf.vaddr;
968 flags = se_ctx->zsk_flags;
969 mac_len = se_ctx->mac_len;
970 pdcp_alg_type = se_ctx->pdcp_alg_type;
972 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
974 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
976 cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
977 (0 << 4) | (0 << 3) | (flags & 0x7));
981 * Microcode expects offsets in bytes
984 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
987 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
988 auth_offset = auth_offset / 8;
990 /* consider iv len */
991 auth_offset += iv_len;
993 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
996 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1001 * Microcode expects offsets in bytes
1002 * TODO: Rounding off
1004 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1006 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1007 encr_offset = encr_offset / 8;
1008 /* consider iv len */
1009 encr_offset += iv_len;
1011 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1012 outputlen = inputlen;
1014 /* iv offset is 0 */
1015 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1018 if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1019 plt_dp_err("Offset not supported");
1020 plt_dp_err("enc_offset: %d", encr_offset);
1021 plt_dp_err("auth_offset: %d", auth_offset);
1026 iv_s = (flags == 0x1) ? params->auth_iv_buf : params->iv_buf;
1028 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1030 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1031 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1034 for (j = 0; j < 4; j++)
1035 iv[j] = iv_s[3 - j];
1037 /* ZUC doesn't need a swap */
1038 for (j = 0; j < 4; j++)
1043 * GP op header, lengths are expected in bits.
1045 cpt_inst_w4.s.param1 = encr_data_len;
1046 cpt_inst_w4.s.param2 = auth_data_len;
1049 * In cn9k, cn10k since we have a limitation of
1050 * IV & Offset control word not part of instruction
1051 * and need to be part of Data Buffer, we check if
1052 * head room is there and then only do the Direct mode processing
1054 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1055 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1056 void *dm_vaddr = params->bufs[0].vaddr;
1058 /* Use Direct mode */
1060 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1061 ROC_SE_OFF_CTRL_LEN - iv_len);
1064 inst->dptr = (uint64_t)offset_vaddr;
1065 /* RPTR should just exclude offset control word */
1066 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1068 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1070 if (likely(iv_len)) {
1071 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1072 ROC_SE_OFF_CTRL_LEN);
1073 memcpy(iv_d, iv, 16);
1076 *offset_vaddr = offset_ctrl;
1078 void *m_vaddr = params->meta_buf.vaddr;
1079 uint32_t i, g_size_bytes, s_size_bytes;
1080 struct roc_se_sglist_comp *gather_comp;
1081 struct roc_se_sglist_comp *scatter_comp;
1085 /* save space for iv */
1086 offset_vaddr = m_vaddr;
1088 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1090 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1092 /* DPTR has SG list */
1093 in_buffer = m_vaddr;
1095 ((uint16_t *)in_buffer)[0] = 0;
1096 ((uint16_t *)in_buffer)[1] = 0;
1098 /* TODO Add error check if space will be sufficient */
1100 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1107 /* Offset control word followed by iv */
1109 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1110 ROC_SE_OFF_CTRL_LEN + iv_len);
1112 /* iv offset is 0 */
1113 *offset_vaddr = offset_ctrl;
1115 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1116 ROC_SE_OFF_CTRL_LEN);
1117 memcpy(iv_d, iv, 16);
1120 size = inputlen - iv_len;
1122 i = fill_sg_comp_from_iov(gather_comp, i,
1123 params->src_iov, 0, &size,
1125 if (unlikely(size)) {
1126 plt_dp_err("Insufficient buffer space,"
1132 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1134 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1137 * Output Scatter List
1142 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1146 /* IV in SLIST only for EEA3 & UEA2 */
1151 i = fill_sg_comp(scatter_comp, i,
1152 (uint64_t)offset_vaddr +
1153 ROC_SE_OFF_CTRL_LEN,
1157 /* Add output data */
1158 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1159 size = outputlen - iv_len - mac_len;
1161 i = fill_sg_comp_from_iov(scatter_comp, i,
1165 if (unlikely(size)) {
1166 plt_dp_err("Insufficient buffer space,"
1175 i = fill_sg_comp_from_buf(scatter_comp, i,
1179 /* Output including mac */
1180 size = outputlen - iv_len;
1182 i = fill_sg_comp_from_iov(scatter_comp, i,
1186 if (unlikely(size)) {
1187 plt_dp_err("Insufficient buffer space,"
1194 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1196 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1198 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1200 /* This is DPTR len in case of SG mode */
1201 cpt_inst_w4.s.dlen = size;
1203 inst->dptr = (uint64_t)in_buffer;
1206 inst->w4.u64 = cpt_inst_w4.u64;
1211 static __rte_always_inline int
1212 cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1213 struct roc_se_fc_params *params,
1214 struct cpt_inst_s *inst)
1217 int32_t inputlen = 0, outputlen;
1218 struct roc_se_ctx *se_ctx;
1219 uint8_t pdcp_alg_type, iv_len = 16;
1220 uint32_t encr_offset;
1221 uint32_t encr_data_len;
1223 uint64_t *offset_vaddr;
1224 uint32_t *iv_s, iv[4], j;
1225 union cpt_inst_w4 cpt_inst_w4;
1228 * Microcode expects offsets in bytes
1229 * TODO: Rounding off
1231 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1232 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1234 se_ctx = params->ctx_buf.vaddr;
1235 flags = se_ctx->zsk_flags;
1236 pdcp_alg_type = se_ctx->pdcp_alg_type;
1238 cpt_inst_w4.u64 = 0;
1239 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1241 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1243 cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
1244 (0 << 4) | (0 << 3) | (flags & 0x7));
1246 /* consider iv len */
1247 encr_offset += iv_len;
1249 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1250 outputlen = inputlen;
1253 iv_s = params->iv_buf;
1254 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1256 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1257 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1260 for (j = 0; j < 4; j++)
1261 iv[j] = iv_s[3 - j];
1263 /* ZUC doesn't need a swap */
1264 for (j = 0; j < 4; j++)
1269 * GP op header, lengths are expected in bits.
1271 cpt_inst_w4.s.param1 = encr_data_len;
1274 * In cn9k, cn10k since we have a limitation of
1275 * IV & Offset control word not part of instruction
1276 * and need to be part of Data Buffer, we check if
1277 * head room is there and then only do the Direct mode processing
1279 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1280 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1281 void *dm_vaddr = params->bufs[0].vaddr;
1283 /* Use Direct mode */
1285 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1286 ROC_SE_OFF_CTRL_LEN - iv_len);
1289 inst->dptr = (uint64_t)offset_vaddr;
1291 /* RPTR should just exclude offset control word */
1292 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1294 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1296 if (likely(iv_len)) {
1297 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1298 ROC_SE_OFF_CTRL_LEN);
1299 memcpy(iv_d, iv, 16);
1302 /* iv offset is 0 */
1303 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1305 void *m_vaddr = params->meta_buf.vaddr;
1306 uint32_t i, g_size_bytes, s_size_bytes;
1307 struct roc_se_sglist_comp *gather_comp;
1308 struct roc_se_sglist_comp *scatter_comp;
1312 /* save space for offset and iv... */
1313 offset_vaddr = m_vaddr;
1315 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1317 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1319 /* DPTR has SG list */
1320 in_buffer = m_vaddr;
1322 ((uint16_t *)in_buffer)[0] = 0;
1323 ((uint16_t *)in_buffer)[1] = 0;
1325 /* TODO Add error check if space will be sufficient */
1327 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1334 /* Offset control word */
1336 /* iv offset is 0 */
1337 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1339 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1340 ROC_SE_OFF_CTRL_LEN + iv_len);
1342 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1343 ROC_SE_OFF_CTRL_LEN);
1344 memcpy(iv_d, iv, 16);
1346 /* Add input data */
1347 size = inputlen - iv_len;
1349 i = fill_sg_comp_from_iov(gather_comp, i,
1350 params->src_iov, 0, &size,
1352 if (unlikely(size)) {
1353 plt_dp_err("Insufficient buffer space,"
1359 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1361 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1364 * Output Scatter List
1369 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1373 i = fill_sg_comp(scatter_comp, i,
1374 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1377 /* Add output data */
1378 size = outputlen - iv_len;
1380 i = fill_sg_comp_from_iov(scatter_comp, i,
1381 params->dst_iov, 0, &size,
1384 if (unlikely(size)) {
1385 plt_dp_err("Insufficient buffer space,"
1391 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1393 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1395 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1397 /* This is DPTR len in case of SG mode */
1398 cpt_inst_w4.s.dlen = size;
1400 inst->dptr = (uint64_t)in_buffer;
1403 if (unlikely((encr_offset >> 16))) {
1404 plt_dp_err("Offset not supported");
1405 plt_dp_err("enc_offset: %d", encr_offset);
1409 inst->w4.u64 = cpt_inst_w4.u64;
1414 static __rte_always_inline int
1415 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1416 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1418 void *m_vaddr = params->meta_buf.vaddr;
1420 int32_t inputlen = 0, outputlen = 0;
1421 struct roc_se_ctx *se_ctx;
1422 uint32_t mac_len = 0;
1424 uint32_t encr_offset, auth_offset;
1425 uint32_t encr_data_len, auth_data_len;
1427 uint8_t *iv_s, *iv_d, iv_len = 8;
1429 uint64_t *offset_vaddr;
1430 union cpt_inst_w4 cpt_inst_w4;
1432 uint32_t g_size_bytes, s_size_bytes;
1433 struct roc_se_sglist_comp *gather_comp;
1434 struct roc_se_sglist_comp *scatter_comp;
1436 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1437 auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1438 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1439 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1441 se_ctx = params->ctx_buf.vaddr;
1442 flags = se_ctx->zsk_flags;
1443 mac_len = se_ctx->mac_len;
1446 iv_s = params->iv_buf;
1448 iv_s = params->auth_iv_buf;
1450 dir = iv_s[8] & 0x1;
1452 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1454 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1455 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1456 (dir << 4) | (0 << 3) | (flags & 0x7));
1459 * GP op header, lengths are expected in bits.
1461 cpt_inst_w4.s.param1 = encr_data_len;
1462 cpt_inst_w4.s.param2 = auth_data_len;
1464 /* consider iv len */
1466 encr_offset += iv_len;
1467 auth_offset += iv_len;
1470 /* save space for offset ctrl and iv */
1471 offset_vaddr = m_vaddr;
1473 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1475 /* DPTR has SG list */
1476 in_buffer = m_vaddr;
1478 ((uint16_t *)in_buffer)[0] = 0;
1479 ((uint16_t *)in_buffer)[1] = 0;
1481 /* TODO Add error check if space will be sufficient */
1482 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1489 /* Offset control word followed by iv */
1492 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1493 outputlen = inputlen;
1494 /* iv offset is 0 */
1495 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1496 if (unlikely((encr_offset >> 16))) {
1497 plt_dp_err("Offset not supported");
1498 plt_dp_err("enc_offset: %d", encr_offset);
1502 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1503 outputlen = mac_len;
1504 /* iv offset is 0 */
1505 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1506 if (unlikely((auth_offset >> 8))) {
1507 plt_dp_err("Offset not supported");
1508 plt_dp_err("auth_offset: %d", auth_offset);
1513 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1514 ROC_SE_OFF_CTRL_LEN + iv_len);
1517 iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1518 memcpy(iv_d, iv_s, iv_len);
1521 size = inputlen - iv_len;
1523 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1526 if (unlikely(size)) {
1527 plt_dp_err("Insufficient buffer space,"
1533 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1534 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1537 * Output Scatter List
1541 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1545 /* IV in SLIST only for F8 */
1551 i = fill_sg_comp(scatter_comp, i,
1552 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1556 /* Add output data */
1557 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1558 size = outputlen - iv_len - mac_len;
1560 i = fill_sg_comp_from_iov(scatter_comp, i,
1561 params->dst_iov, 0, &size,
1564 if (unlikely(size)) {
1565 plt_dp_err("Insufficient buffer space,"
1574 i = fill_sg_comp_from_buf(scatter_comp, i,
1578 /* Output including mac */
1579 size = outputlen - iv_len;
1581 i = fill_sg_comp_from_iov(scatter_comp, i,
1582 params->dst_iov, 0, &size,
1585 if (unlikely(size)) {
1586 plt_dp_err("Insufficient buffer space,"
1593 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1594 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1596 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1598 /* This is DPTR len in case of SG mode */
1599 cpt_inst_w4.s.dlen = size;
1601 inst->dptr = (uint64_t)in_buffer;
1602 inst->w4.u64 = cpt_inst_w4.u64;
1607 static __rte_always_inline int
1608 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1609 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1611 void *m_vaddr = params->meta_buf.vaddr;
1613 int32_t inputlen = 0, outputlen;
1614 struct roc_se_ctx *se_ctx;
1615 uint8_t i = 0, iv_len = 8;
1616 uint32_t encr_offset;
1617 uint32_t encr_data_len;
1620 uint64_t *offset_vaddr;
1621 union cpt_inst_w4 cpt_inst_w4;
1623 uint32_t g_size_bytes, s_size_bytes;
1624 struct roc_se_sglist_comp *gather_comp;
1625 struct roc_se_sglist_comp *scatter_comp;
1627 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1628 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1630 se_ctx = params->ctx_buf.vaddr;
1631 flags = se_ctx->zsk_flags;
1633 cpt_inst_w4.u64 = 0;
1634 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1636 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1637 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1638 (dir << 4) | (0 << 3) | (flags & 0x7));
1641 * GP op header, lengths are expected in bits.
1643 cpt_inst_w4.s.param1 = encr_data_len;
1645 /* consider iv len */
1646 encr_offset += iv_len;
1648 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1649 outputlen = inputlen;
1651 /* save space for offset ctrl & iv */
1652 offset_vaddr = m_vaddr;
1654 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1656 /* DPTR has SG list */
1657 in_buffer = m_vaddr;
1659 ((uint16_t *)in_buffer)[0] = 0;
1660 ((uint16_t *)in_buffer)[1] = 0;
1662 /* TODO Add error check if space will be sufficient */
1663 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1670 /* Offset control word followed by iv */
1671 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1672 if (unlikely((encr_offset >> 16))) {
1673 plt_dp_err("Offset not supported");
1674 plt_dp_err("enc_offset: %d", encr_offset);
1678 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1679 ROC_SE_OFF_CTRL_LEN + iv_len);
1682 memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1685 /* Add input data */
1686 size = inputlen - iv_len;
1688 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1690 if (unlikely(size)) {
1691 plt_dp_err("Insufficient buffer space,"
1697 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1698 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1701 * Output Scatter List
1705 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1709 i = fill_sg_comp(scatter_comp, i,
1710 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1712 /* Add output data */
1713 size = outputlen - iv_len;
1715 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1717 if (unlikely(size)) {
1718 plt_dp_err("Insufficient buffer space,"
1724 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1725 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1727 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1729 /* This is DPTR len in case of SG mode */
1730 cpt_inst_w4.s.dlen = size;
1732 inst->dptr = (uint64_t)in_buffer;
1733 inst->w4.u64 = cpt_inst_w4.u64;
1738 static __rte_always_inline int
1739 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1740 struct roc_se_fc_params *fc_params,
1741 struct cpt_inst_s *inst)
1743 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1747 fc_type = ctx->fc_type;
1749 if (likely(fc_type == ROC_SE_FC_GEN)) {
1750 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1751 } else if (fc_type == ROC_SE_PDCP) {
1752 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
1754 } else if (fc_type == ROC_SE_KASUMI) {
1755 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1759 * For AUTH_ONLY case,
1760 * MC only supports digest generation and verification
1761 * should be done in software by memcmp()
1767 static __rte_always_inline int
1768 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1769 struct roc_se_fc_params *fc_params,
1770 struct cpt_inst_s *inst)
1772 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1776 fc_type = ctx->fc_type;
1778 if (likely(fc_type == ROC_SE_FC_GEN)) {
1779 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1780 } else if (fc_type == ROC_SE_PDCP) {
1781 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
1783 } else if (fc_type == ROC_SE_KASUMI) {
1784 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1786 } else if (fc_type == ROC_SE_HASH_HMAC) {
1787 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1793 static __rte_always_inline int
1794 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1796 struct rte_crypto_aead_xform *aead_form;
1797 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1798 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1799 uint32_t cipher_key_len = 0;
1800 uint8_t aes_gcm = 0;
1801 aead_form = &xform->aead;
1803 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1804 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1805 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1806 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1807 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1808 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1810 plt_dp_err("Unknown aead operation\n");
1813 switch (aead_form->algo) {
1814 case RTE_CRYPTO_AEAD_AES_GCM:
1815 enc_type = ROC_SE_AES_GCM;
1816 cipher_key_len = 16;
1819 case RTE_CRYPTO_AEAD_AES_CCM:
1820 plt_dp_err("Crypto: Unsupported cipher algo %u",
1823 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1824 enc_type = ROC_SE_CHACHA20;
1825 auth_type = ROC_SE_POLY1305;
1826 cipher_key_len = 32;
1827 sess->chacha_poly = 1;
1830 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1834 if (aead_form->key.length < cipher_key_len) {
1835 plt_dp_err("Invalid cipher params keylen %u",
1836 aead_form->key.length);
1840 sess->aes_gcm = aes_gcm;
1841 sess->mac_len = aead_form->digest_length;
1842 sess->iv_offset = aead_form->iv.offset;
1843 sess->iv_length = aead_form->iv.length;
1844 sess->aad_length = aead_form->aad_length;
1846 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1847 aead_form->key.data,
1848 aead_form->key.length, NULL)))
1851 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1852 aead_form->digest_length)))
1858 static __rte_always_inline int
1859 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1861 struct rte_crypto_cipher_xform *c_form;
1862 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1863 uint32_t cipher_key_len = 0;
1864 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1866 c_form = &xform->cipher;
1868 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1869 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1870 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1871 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1872 if (xform->next != NULL &&
1873 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1874 /* Perform decryption followed by auth verify */
1875 sess->roc_se_ctx.template_w4.s.opcode_minor =
1876 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1879 plt_dp_err("Unknown cipher operation\n");
1883 switch (c_form->algo) {
1884 case RTE_CRYPTO_CIPHER_AES_CBC:
1885 enc_type = ROC_SE_AES_CBC;
1886 cipher_key_len = 16;
1888 case RTE_CRYPTO_CIPHER_3DES_CBC:
1889 enc_type = ROC_SE_DES3_CBC;
1890 cipher_key_len = 24;
1892 case RTE_CRYPTO_CIPHER_DES_CBC:
1893 /* DES is implemented using 3DES in hardware */
1894 enc_type = ROC_SE_DES3_CBC;
1897 case RTE_CRYPTO_CIPHER_AES_CTR:
1898 enc_type = ROC_SE_AES_CTR;
1899 cipher_key_len = 16;
1902 case RTE_CRYPTO_CIPHER_NULL:
1906 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1907 enc_type = ROC_SE_KASUMI_F8_ECB;
1908 cipher_key_len = 16;
1909 zsk_flag = ROC_SE_K_F8;
1911 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1912 enc_type = ROC_SE_SNOW3G_UEA2;
1913 cipher_key_len = 16;
1914 zsk_flag = ROC_SE_ZS_EA;
1916 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1917 enc_type = ROC_SE_ZUC_EEA3;
1918 cipher_key_len = 16;
1919 zsk_flag = ROC_SE_ZS_EA;
1921 case RTE_CRYPTO_CIPHER_AES_XTS:
1922 enc_type = ROC_SE_AES_XTS;
1923 cipher_key_len = 16;
1925 case RTE_CRYPTO_CIPHER_3DES_ECB:
1926 enc_type = ROC_SE_DES3_ECB;
1927 cipher_key_len = 24;
1929 case RTE_CRYPTO_CIPHER_AES_ECB:
1930 enc_type = ROC_SE_AES_ECB;
1931 cipher_key_len = 16;
1933 case RTE_CRYPTO_CIPHER_3DES_CTR:
1934 case RTE_CRYPTO_CIPHER_AES_F8:
1935 case RTE_CRYPTO_CIPHER_ARC4:
1936 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1939 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1944 if (c_form->key.length < cipher_key_len) {
1945 plt_dp_err("Invalid cipher params keylen %u",
1946 c_form->key.length);
1950 sess->zsk_flag = zsk_flag;
1952 sess->aes_ctr = aes_ctr;
1953 sess->iv_offset = c_form->iv.offset;
1954 sess->iv_length = c_form->iv.length;
1955 sess->is_null = is_null;
1957 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1958 c_form->key.data, c_form->key.length,
1965 static __rte_always_inline int
1966 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1968 struct rte_crypto_auth_xform *a_form;
1969 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1970 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1972 if (xform->next != NULL &&
1973 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1974 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1975 /* Perform auth followed by encryption */
1976 sess->roc_se_ctx.template_w4.s.opcode_minor =
1977 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1980 a_form = &xform->auth;
1982 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1983 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1984 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1985 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1987 plt_dp_err("Unknown auth operation");
1991 switch (a_form->algo) {
1992 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1994 case RTE_CRYPTO_AUTH_SHA1:
1995 auth_type = ROC_SE_SHA1_TYPE;
1997 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1998 case RTE_CRYPTO_AUTH_SHA256:
1999 auth_type = ROC_SE_SHA2_SHA256;
2001 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2002 case RTE_CRYPTO_AUTH_SHA512:
2003 auth_type = ROC_SE_SHA2_SHA512;
2005 case RTE_CRYPTO_AUTH_AES_GMAC:
2006 auth_type = ROC_SE_GMAC_TYPE;
2009 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2010 case RTE_CRYPTO_AUTH_SHA224:
2011 auth_type = ROC_SE_SHA2_SHA224;
2013 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2014 case RTE_CRYPTO_AUTH_SHA384:
2015 auth_type = ROC_SE_SHA2_SHA384;
2017 case RTE_CRYPTO_AUTH_MD5_HMAC:
2018 case RTE_CRYPTO_AUTH_MD5:
2019 auth_type = ROC_SE_MD5_TYPE;
2021 case RTE_CRYPTO_AUTH_KASUMI_F9:
2022 auth_type = ROC_SE_KASUMI_F9_ECB;
2024 * Indicate that direction needs to be taken out
2027 zsk_flag = ROC_SE_K_F9;
2029 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2030 auth_type = ROC_SE_SNOW3G_UIA2;
2031 zsk_flag = ROC_SE_ZS_IA;
2033 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2034 auth_type = ROC_SE_ZUC_EIA3;
2035 zsk_flag = ROC_SE_ZS_IA;
2037 case RTE_CRYPTO_AUTH_NULL:
2041 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2042 case RTE_CRYPTO_AUTH_AES_CMAC:
2043 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2044 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
2047 plt_dp_err("Crypto: Undefined Hash algo %u specified",
2052 sess->zsk_flag = zsk_flag;
2053 sess->aes_gcm = aes_gcm;
2054 sess->mac_len = a_form->digest_length;
2055 sess->is_null = is_null;
2057 sess->auth_iv_offset = a_form->iv.offset;
2058 sess->auth_iv_length = a_form->iv.length;
2060 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
2061 a_form->key.data, a_form->key.length,
2062 a_form->digest_length)))
2068 static __rte_always_inline int
2069 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
2071 struct rte_crypto_auth_xform *a_form;
2072 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
2073 roc_se_auth_type auth_type = 0; /* NULL Auth type */
2075 a_form = &xform->auth;
2077 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2078 sess->cpt_op |= ROC_SE_OP_ENCODE;
2079 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2080 sess->cpt_op |= ROC_SE_OP_DECODE;
2082 plt_dp_err("Unknown auth operation");
2086 switch (a_form->algo) {
2087 case RTE_CRYPTO_AUTH_AES_GMAC:
2088 enc_type = ROC_SE_AES_GCM;
2089 auth_type = ROC_SE_GMAC_TYPE;
2092 plt_dp_err("Crypto: Undefined cipher algo %u specified",
2100 sess->iv_offset = a_form->iv.offset;
2101 sess->iv_length = a_form->iv.length;
2102 sess->mac_len = a_form->digest_length;
2104 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
2105 a_form->key.data, a_form->key.length,
2109 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
2110 a_form->digest_length)))
2116 static __rte_always_inline void *
2117 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
2118 struct rte_mempool *cpt_meta_pool,
2119 struct cpt_inflight_req *infl_req)
2123 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2129 infl_req->mdata = mdata;
2130 infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
2135 static __rte_always_inline uint32_t
2136 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2137 uint32_t start_offset)
2140 void *seg_data = NULL;
2141 int32_t seg_size = 0;
2148 if (!start_offset) {
2149 seg_data = rte_pktmbuf_mtod(pkt, void *);
2150 seg_size = pkt->data_len;
2152 while (start_offset >= pkt->data_len) {
2153 start_offset -= pkt->data_len;
2157 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2158 seg_size = pkt->data_len - start_offset;
2164 iovec->bufs[index].vaddr = seg_data;
2165 iovec->bufs[index].size = seg_size;
2169 while (unlikely(pkt != NULL)) {
2170 seg_data = rte_pktmbuf_mtod(pkt, void *);
2171 seg_size = pkt->data_len;
2175 iovec->bufs[index].vaddr = seg_data;
2176 iovec->bufs[index].size = seg_size;
2183 iovec->buf_cnt = index;
2187 static __rte_always_inline uint32_t
2188 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2189 struct roc_se_fc_params *param, uint32_t *flags)
2192 void *seg_data = NULL;
2193 uint32_t seg_size = 0;
2194 struct roc_se_iov_ptr *iovec;
2196 seg_data = rte_pktmbuf_mtod(pkt, void *);
2197 seg_size = pkt->data_len;
2200 if (likely(!pkt->next)) {
2203 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2204 headroom = rte_pktmbuf_headroom(pkt);
2205 if (likely(headroom >= 24))
2206 *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2208 param->bufs[0].vaddr = seg_data;
2209 param->bufs[0].size = seg_size;
2212 iovec = param->src_iov;
2213 iovec->bufs[index].vaddr = seg_data;
2214 iovec->bufs[index].size = seg_size;
2218 while (unlikely(pkt != NULL)) {
2219 seg_data = rte_pktmbuf_mtod(pkt, void *);
2220 seg_size = pkt->data_len;
2225 iovec->bufs[index].vaddr = seg_data;
2226 iovec->bufs[index].size = seg_size;
2233 iovec->buf_cnt = index;
2237 static __rte_always_inline int
2238 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2239 struct cpt_qp_meta_info *m_info,
2240 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2242 struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2243 uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2244 struct rte_crypto_sym_op *sym_op = cop->sym;
2246 uint32_t mc_hash_off;
2248 uint64_t d_offs, d_lens;
2249 struct rte_mbuf *m_src, *m_dst;
2250 uint8_t cpt_op = sess->cpt_op;
2251 #ifdef CPT_ALWAYS_USE_SG_MODE
2252 uint8_t inplace = 0;
2254 uint8_t inplace = 1;
2256 struct roc_se_fc_params fc_params;
2257 char src[SRC_IOV_SIZE];
2258 char dst[SRC_IOV_SIZE];
2262 if (likely(sess->iv_length)) {
2263 flags |= ROC_SE_VALID_IV_BUF;
2264 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2266 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2267 memcpy((uint8_t *)iv_buf,
2268 rte_crypto_op_ctod_offset(cop, uint8_t *,
2271 iv_buf[3] = rte_cpu_to_be_32(0x1);
2272 fc_params.iv_buf = iv_buf;
2276 if (sess->zsk_flag) {
2277 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2278 cop, uint8_t *, sess->auth_iv_offset);
2279 if (sess->zsk_flag != ROC_SE_ZS_EA)
2282 m_src = sym_op->m_src;
2283 m_dst = sym_op->m_dst;
2285 if (sess->aes_gcm || sess->chacha_poly) {
2290 d_offs = sym_op->aead.data.offset;
2291 d_lens = sym_op->aead.data.length;
2293 sym_op->aead.data.offset + sym_op->aead.data.length;
2295 aad_data = sym_op->aead.aad.data;
2296 aad_len = sess->aad_length;
2297 if (likely((aad_data + aad_len) ==
2298 rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2299 sym_op->aead.data.offset))) {
2300 d_offs = (d_offs - aad_len) | (d_offs << 16);
2301 d_lens = (d_lens + aad_len) | (d_lens << 32);
2303 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2304 fc_params.aad_buf.size = aad_len;
2305 flags |= ROC_SE_VALID_AAD_BUF;
2307 d_offs = d_offs << 16;
2308 d_lens = d_lens << 32;
2311 salt = fc_params.iv_buf;
2312 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2313 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2314 sess->salt = *(uint32_t *)salt;
2316 fc_params.iv_buf = salt + 4;
2317 if (likely(sess->mac_len)) {
2318 struct rte_mbuf *m =
2319 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2324 /* hmac immediately following data is best case */
2325 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2327 (uint8_t *)sym_op->aead.digest.data)) {
2328 flags |= ROC_SE_VALID_MAC_BUF;
2329 fc_params.mac_buf.size = sess->mac_len;
2330 fc_params.mac_buf.vaddr =
2331 sym_op->aead.digest.data;
2336 d_offs = sym_op->cipher.data.offset;
2337 d_lens = sym_op->cipher.data.length;
2339 sym_op->cipher.data.offset + sym_op->cipher.data.length;
2340 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2341 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2344 (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2345 mc_hash_off = (sym_op->auth.data.offset +
2346 sym_op->auth.data.length);
2348 /* for gmac, salt should be updated like in gcm */
2349 if (unlikely(sess->is_gmac)) {
2351 salt = fc_params.iv_buf;
2352 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2353 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2354 sess->salt = *(uint32_t *)salt;
2356 fc_params.iv_buf = salt + 4;
2358 if (likely(sess->mac_len)) {
2361 m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2365 /* hmac immediately following data is best case */
2366 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2367 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2369 (uint8_t *)sym_op->auth.digest.data))) {
2370 flags |= ROC_SE_VALID_MAC_BUF;
2371 fc_params.mac_buf.size = sess->mac_len;
2372 fc_params.mac_buf.vaddr =
2373 sym_op->auth.digest.data;
2378 fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2380 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2381 unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2384 if (likely(!m_dst && inplace)) {
2385 /* Case of single buffer without AAD buf or
2386 * separate mac buf in place and
2389 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2391 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2393 plt_dp_err("Prepare inplace src iov failed");
2399 /* Out of place processing */
2400 fc_params.src_iov = (void *)src;
2401 fc_params.dst_iov = (void *)dst;
2403 /* Store SG I/O in the api for reuse */
2404 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2405 plt_dp_err("Prepare src iov failed");
2410 if (unlikely(m_dst != NULL)) {
2413 /* Try to make room as much as src has */
2414 pkt_len = rte_pktmbuf_pkt_len(m_dst);
2416 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2417 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2418 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2419 plt_dp_err("Not enough space in "
2428 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2429 plt_dp_err("Prepare dst iov failed for "
2436 fc_params.dst_iov = (void *)src;
2440 if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2441 (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2442 ((ctx->fc_type == ROC_SE_FC_GEN) ||
2443 (ctx->fc_type == ROC_SE_PDCP))))) {
2444 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2445 m_info->pool, infl_req);
2446 if (mdata == NULL) {
2447 plt_dp_err("Error allocating meta buffer for request");
2452 /* Finally prepare the instruction */
2453 if (cpt_op & ROC_SE_OP_ENCODE)
2454 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2457 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2460 if (unlikely(ret)) {
2461 plt_dp_err("Preparing request failed due to bad input arg");
2462 goto free_mdata_and_exit;
2467 free_mdata_and_exit:
2468 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2469 rte_mempool_put(m_info->pool, infl_req->mdata);
2474 static __rte_always_inline void
2475 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2478 struct rte_crypto_sym_op *sym_op = op->sym;
2480 if (sym_op->auth.digest.data)
2481 mac = sym_op->auth.digest.data;
2483 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2484 sym_op->auth.data.length +
2485 sym_op->auth.data.offset);
2487 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2491 if (memcmp(mac, gen_mac, mac_len))
2492 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2494 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2497 static __rte_always_inline void
2498 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2499 uint32_t *addr_length_in_bits,
2500 uint8_t *addr_direction)
2505 while (!found && counter_num_bytes > 0) {
2506 counter_num_bytes--;
2507 if (src[counter_num_bytes] == 0x00)
2509 pos = rte_bsf32(src[counter_num_bytes]);
2511 if (likely(counter_num_bytes > 0)) {
2512 last_byte = src[counter_num_bytes - 1];
2513 *addr_direction = last_byte & 0x1;
2514 *addr_length_in_bits =
2515 counter_num_bytes * 8 - 1;
2518 last_byte = src[counter_num_bytes];
2519 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2520 *addr_length_in_bits =
2521 counter_num_bytes * 8 + (8 - (pos + 2));
2528 * This handles all auth only except AES_GMAC
2530 static __rte_always_inline int
2531 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2532 struct cpt_qp_meta_info *m_info,
2533 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2536 struct rte_crypto_sym_op *sym_op = cop->sym;
2538 uint32_t auth_range_off;
2540 uint64_t d_offs = 0, d_lens;
2541 struct rte_mbuf *m_src, *m_dst;
2542 uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2543 uint16_t mac_len = sess->mac_len;
2544 struct roc_se_fc_params params;
2545 char src[SRC_IOV_SIZE];
2549 memset(¶ms, 0, sizeof(struct roc_se_fc_params));
2551 m_src = sym_op->m_src;
2553 mdata = alloc_op_meta(¶ms.meta_buf, m_info->mlen, m_info->pool,
2555 if (mdata == NULL) {
2560 auth_range_off = sym_op->auth.data.offset;
2562 flags = ROC_SE_VALID_MAC_BUF;
2563 params.src_iov = (void *)src;
2564 if (unlikely(sess->zsk_flag)) {
2566 * Since for Zuc, Kasumi, Snow3g offsets are in bits
2567 * we will send pass through even for auth only case,
2570 d_offs = auth_range_off;
2572 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2573 cop, uint8_t *, sess->auth_iv_offset);
2574 if (sess->zsk_flag == ROC_SE_K_F9) {
2575 uint32_t length_in_bits, num_bytes;
2576 uint8_t *src, direction = 0;
2579 rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2581 * This is kasumi f9, take direction from
2584 length_in_bits = cop->sym->auth.data.length;
2585 num_bytes = (length_in_bits >> 3);
2586 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2587 find_kasumif9_direction_and_length(
2588 src, num_bytes, &length_in_bits, &direction);
2589 length_in_bits -= 64;
2590 cop->sym->auth.data.offset += 64;
2591 d_offs = cop->sym->auth.data.offset;
2592 auth_range_off = d_offs / 8;
2593 cop->sym->auth.data.length = length_in_bits;
2595 /* Store it at end of auth iv */
2596 iv_buf[8] = direction;
2597 params.auth_iv_buf = iv_buf;
2601 d_lens = sym_op->auth.data.length;
2603 params.ctx_buf.vaddr = &sess->roc_se_ctx;
2605 if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2606 if (sym_op->auth.digest.data) {
2608 * Digest to be generated
2609 * in separate buffer
2611 params.mac_buf.size = sess->mac_len;
2612 params.mac_buf.vaddr = sym_op->auth.digest.data;
2614 uint32_t off = sym_op->auth.data.offset +
2615 sym_op->auth.data.length;
2616 int32_t dlen, space;
2618 m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2619 dlen = rte_pktmbuf_pkt_len(m_dst);
2621 space = off + mac_len - dlen;
2623 if (!rte_pktmbuf_append(m_dst, space)) {
2624 plt_dp_err("Failed to extend "
2628 goto free_mdata_and_exit;
2631 params.mac_buf.vaddr =
2632 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2633 params.mac_buf.size = mac_len;
2636 uint64_t *op = mdata;
2638 /* Need space for storing generated mac */
2639 space += 2 * sizeof(uint64_t);
2641 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2642 params.mac_buf.size = mac_len;
2643 space += RTE_ALIGN_CEIL(mac_len, 8);
2644 op[0] = (uintptr_t)params.mac_buf.vaddr;
2646 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2649 params.meta_buf.vaddr = (uint8_t *)mdata + space;
2650 params.meta_buf.size -= space;
2652 /* Out of place processing */
2653 params.src_iov = (void *)src;
2655 /*Store SG I/O in the api for reuse */
2656 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2657 plt_dp_err("Prepare src iov failed");
2659 goto free_mdata_and_exit;
2662 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, inst);
2664 goto free_mdata_and_exit;
2668 free_mdata_and_exit:
2669 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2670 rte_mempool_put(m_info->pool, infl_req->mdata);
2674 #endif /*_CNXK_SE_H_ */