1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
12 #define SRC_IOV_SIZE \
13 (sizeof(struct roc_se_iov_ptr) + \
14 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE \
16 (sizeof(struct roc_se_iov_ptr) + \
17 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
21 uint16_t zsk_flag : 4;
24 uint16_t chacha_poly : 1;
31 uint8_t auth_iv_length;
33 uint16_t auth_iv_offset;
36 struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
39 static __rte_always_inline int
40 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess);
43 cpt_pack_iv(uint8_t *iv_src, uint8_t *iv_dst)
45 iv_dst[16] = iv_src[16];
46 /* pack the last 8 bytes of IV to 6 bytes.
47 * discard the 2 MSB bits of each byte
49 iv_dst[17] = (((iv_src[17] & 0x3f) << 2) | ((iv_src[18] >> 4) & 0x3));
50 iv_dst[18] = (((iv_src[18] & 0xf) << 4) | ((iv_src[19] >> 2) & 0xf));
51 iv_dst[19] = (((iv_src[19] & 0x3) << 6) | (iv_src[20] & 0x3f));
53 iv_dst[20] = (((iv_src[21] & 0x3f) << 2) | ((iv_src[22] >> 4) & 0x3));
54 iv_dst[21] = (((iv_src[22] & 0xf) << 4) | ((iv_src[23] >> 2) & 0xf));
55 iv_dst[22] = (((iv_src[23] & 0x3) << 6) | (iv_src[24] & 0x3f));
59 pdcp_iv_copy(uint8_t *iv_d, uint8_t *iv_s, const uint8_t pdcp_alg_type,
62 uint32_t *iv_s_temp, iv_temp[4];
65 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
67 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
68 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
71 iv_s_temp = (uint32_t *)iv_s;
73 for (j = 0; j < 4; j++)
74 iv_temp[j] = iv_s_temp[3 - j];
75 memcpy(iv_d, iv_temp, 16);
77 /* ZUC doesn't need a swap */
78 memcpy(iv_d, iv_s, 16);
80 cpt_pack_iv(iv_s, iv_d);
84 static __rte_always_inline int
85 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
87 uint16_t mac_len = auth->digest_length;
91 case RTE_CRYPTO_AUTH_MD5:
92 case RTE_CRYPTO_AUTH_MD5_HMAC:
93 ret = (mac_len == 16) ? 0 : -1;
95 case RTE_CRYPTO_AUTH_SHA1:
96 case RTE_CRYPTO_AUTH_SHA1_HMAC:
97 ret = (mac_len == 20) ? 0 : -1;
99 case RTE_CRYPTO_AUTH_SHA224:
100 case RTE_CRYPTO_AUTH_SHA224_HMAC:
101 ret = (mac_len == 28) ? 0 : -1;
103 case RTE_CRYPTO_AUTH_SHA256:
104 case RTE_CRYPTO_AUTH_SHA256_HMAC:
105 ret = (mac_len == 32) ? 0 : -1;
107 case RTE_CRYPTO_AUTH_SHA384:
108 case RTE_CRYPTO_AUTH_SHA384_HMAC:
109 ret = (mac_len == 48) ? 0 : -1;
111 case RTE_CRYPTO_AUTH_SHA512:
112 case RTE_CRYPTO_AUTH_SHA512_HMAC:
113 ret = (mac_len == 64) ? 0 : -1;
115 case RTE_CRYPTO_AUTH_NULL:
125 static __rte_always_inline void
126 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
128 struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
129 memcpy(fctx->enc.encr_iv, salt, 4);
132 static __rte_always_inline uint32_t
133 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
136 struct roc_se_sglist_comp *to = &list[i >> 2];
138 to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
139 to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
144 static __rte_always_inline uint32_t
145 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
146 struct roc_se_buf_ptr *from)
148 struct roc_se_sglist_comp *to = &list[i >> 2];
150 to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
151 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
156 static __rte_always_inline uint32_t
157 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
158 struct roc_se_buf_ptr *from, uint32_t *psize)
160 struct roc_se_sglist_comp *to = &list[i >> 2];
161 uint32_t size = *psize;
164 e_len = (size > from->size) ? from->size : size;
165 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
166 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
173 * This fills the MC expected SGIO list
174 * from IOV given by user.
176 static __rte_always_inline uint32_t
177 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
178 struct roc_se_iov_ptr *from, uint32_t from_offset,
179 uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
180 uint32_t extra_offset)
183 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
184 uint32_t size = *psize;
185 struct roc_se_buf_ptr *bufs;
188 for (j = 0; (j < from->buf_cnt) && size; j++) {
191 struct roc_se_sglist_comp *to = &list[i >> 2];
193 if (unlikely(from_offset)) {
194 if (from_offset >= bufs[j].size) {
195 from_offset -= bufs[j].size;
198 e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
199 e_len = (size > (bufs[j].size - from_offset)) ?
200 (bufs[j].size - from_offset) :
204 e_vaddr = (uint64_t)bufs[j].vaddr;
205 e_len = (size > bufs[j].size) ? bufs[j].size : size;
208 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
209 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
211 if (extra_len && (e_len >= extra_offset)) {
212 /* Break the data at given offset */
213 uint32_t next_len = e_len - extra_offset;
214 uint64_t next_vaddr = e_vaddr + extra_offset;
219 e_len = extra_offset;
221 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
224 extra_len = RTE_MIN(extra_len, size);
225 /* Insert extra data ptr */
230 rte_cpu_to_be_16(extra_len);
231 to->ptr[i % 4] = rte_cpu_to_be_64(
232 (uint64_t)extra_buf->vaddr);
236 next_len = RTE_MIN(next_len, size);
237 /* insert the rest of the data */
241 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
242 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
251 extra_offset -= size;
259 static __rte_always_inline int
260 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
261 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
263 void *m_vaddr = params->meta_buf.vaddr;
265 uint16_t data_len, mac_len, key_len;
266 roc_se_auth_type hash_type;
267 struct roc_se_ctx *ctx;
268 struct roc_se_sglist_comp *gather_comp;
269 struct roc_se_sglist_comp *scatter_comp;
271 uint32_t g_size_bytes, s_size_bytes;
272 union cpt_inst_w4 cpt_inst_w4;
274 ctx = params->ctx_buf.vaddr;
276 hash_type = ctx->hash_type;
277 mac_len = ctx->mac_len;
278 key_len = ctx->auth_key_len;
279 data_len = ROC_SE_AUTH_DLEN(d_lens);
282 cpt_inst_w4.s.opcode_minor = 0;
283 cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
285 cpt_inst_w4.s.opcode_major =
286 ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
287 cpt_inst_w4.s.param1 = key_len;
288 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
290 cpt_inst_w4.s.opcode_major =
291 ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
292 cpt_inst_w4.s.param1 = 0;
293 cpt_inst_w4.s.dlen = data_len;
296 /* Null auth only case enters the if */
297 if (unlikely(!hash_type && !ctx->enc_cipher)) {
298 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
299 /* Minor op is passthrough */
300 cpt_inst_w4.s.opcode_minor = 0x03;
301 /* Send out completion code only */
302 cpt_inst_w4.s.param2 = 0x1;
305 /* DPTR has SG list */
308 ((uint16_t *)in_buffer)[0] = 0;
309 ((uint16_t *)in_buffer)[1] = 0;
311 /* TODO Add error check if space will be sufficient */
312 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
321 uint64_t k_vaddr = (uint64_t)ctx->auth_key;
323 i = fill_sg_comp(gather_comp, i, k_vaddr,
324 RTE_ALIGN_CEIL(key_len, 8));
330 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
332 if (unlikely(size)) {
333 plt_dp_err("Insufficient dst IOV size, short by %dB",
339 * Looks like we need to support zero data
340 * gather ptr in case of hash & hmac
344 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
345 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
352 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
355 if (flags & ROC_SE_VALID_MAC_BUF) {
356 if (unlikely(params->mac_buf.size < mac_len)) {
357 plt_dp_err("Insufficient MAC size");
362 i = fill_sg_comp_from_buf_min(scatter_comp, i, ¶ms->mac_buf,
366 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
367 data_len, &size, NULL, 0);
368 if (unlikely(size)) {
369 plt_dp_err("Insufficient dst IOV size, short by %dB",
375 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
376 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
378 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
380 /* This is DPTR len in case of SG mode */
381 cpt_inst_w4.s.dlen = size;
383 inst->dptr = (uint64_t)in_buffer;
384 inst->w4.u64 = cpt_inst_w4.u64;
389 static __rte_always_inline int
390 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
391 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
393 uint32_t iv_offset = 0;
394 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
395 struct roc_se_ctx *se_ctx;
396 uint32_t cipher_type, hash_type;
397 uint32_t mac_len, size;
399 struct roc_se_buf_ptr *aad_buf = NULL;
400 uint32_t encr_offset, auth_offset;
401 uint32_t encr_data_len, auth_data_len, aad_len = 0;
402 uint32_t passthrough_len = 0;
403 union cpt_inst_w4 cpt_inst_w4;
407 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
408 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
409 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
410 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
411 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
412 /* We don't support both AAD and auth data separately */
415 aad_len = fc_params->aad_buf.size;
416 aad_buf = &fc_params->aad_buf;
418 se_ctx = fc_params->ctx_buf.vaddr;
419 cipher_type = se_ctx->enc_cipher;
420 hash_type = se_ctx->hash_type;
421 mac_len = se_ctx->mac_len;
422 op_minor = se_ctx->template_w4.s.opcode_minor;
424 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
426 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
429 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
431 * When AAD is given, data above encr_offset is pass through
432 * Since AAD is given as separate pointer and not as offset,
433 * this is a special case as we need to fragment input data
434 * into passthrough + encr_data and then insert AAD in between.
436 if (hash_type != ROC_SE_GMAC_TYPE) {
437 passthrough_len = encr_offset;
438 auth_offset = passthrough_len + iv_len;
439 encr_offset = passthrough_len + aad_len + iv_len;
440 auth_data_len = aad_len + encr_data_len;
442 passthrough_len = 16 + aad_len;
443 auth_offset = passthrough_len + iv_len;
444 auth_data_len = aad_len;
447 encr_offset += iv_len;
448 auth_offset += iv_len;
452 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
453 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
454 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
456 if (hash_type == ROC_SE_GMAC_TYPE) {
461 auth_dlen = auth_offset + auth_data_len;
462 enc_dlen = encr_data_len + encr_offset;
463 if (unlikely(encr_data_len & 0xf)) {
464 if ((cipher_type == ROC_SE_DES3_CBC) ||
465 (cipher_type == ROC_SE_DES3_ECB))
467 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
468 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
469 (cipher_type == ROC_SE_AES_ECB)))
471 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
474 if (unlikely(auth_dlen > enc_dlen)) {
475 inputlen = auth_dlen;
476 outputlen = auth_dlen + mac_len;
479 outputlen = enc_dlen + mac_len;
482 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
483 outputlen = enc_dlen;
486 cpt_inst_w4.s.param1 = encr_data_len;
487 cpt_inst_w4.s.param2 = auth_data_len;
490 * In cn9k, cn10k since we have a limitation of
491 * IV & Offset control word not part of instruction
492 * and need to be part of Data Buffer, we check if
493 * head room is there and then only do the Direct mode processing
495 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
496 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
497 void *dm_vaddr = fc_params->bufs[0].vaddr;
499 /* Use Direct mode */
502 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
505 inst->dptr = (uint64_t)offset_vaddr;
507 /* RPTR should just exclude offset control word */
508 inst->rptr = (uint64_t)dm_vaddr - iv_len;
510 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
512 if (likely(iv_len)) {
513 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
514 ROC_SE_OFF_CTRL_LEN);
515 uint64_t *src = fc_params->iv_buf;
521 void *m_vaddr = fc_params->meta_buf.vaddr;
522 uint32_t i, g_size_bytes, s_size_bytes;
523 struct roc_se_sglist_comp *gather_comp;
524 struct roc_se_sglist_comp *scatter_comp;
527 /* This falls under strict SG mode */
528 offset_vaddr = m_vaddr;
529 size = ROC_SE_OFF_CTRL_LEN + iv_len;
531 m_vaddr = (uint8_t *)m_vaddr + size;
533 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
535 if (likely(iv_len)) {
536 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
537 ROC_SE_OFF_CTRL_LEN);
538 uint64_t *src = fc_params->iv_buf;
543 /* DPTR has SG list */
546 ((uint16_t *)in_buffer)[0] = 0;
547 ((uint16_t *)in_buffer)[1] = 0;
549 /* TODO Add error check if space will be sufficient */
551 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
559 /* Offset control word that includes iv */
560 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
561 ROC_SE_OFF_CTRL_LEN + iv_len);
564 size = inputlen - iv_len;
566 uint32_t aad_offset = aad_len ? passthrough_len : 0;
568 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
569 i = fill_sg_comp_from_buf_min(
570 gather_comp, i, fc_params->bufs, &size);
572 i = fill_sg_comp_from_iov(
573 gather_comp, i, fc_params->src_iov, 0,
574 &size, aad_buf, aad_offset);
577 if (unlikely(size)) {
578 plt_dp_err("Insufficient buffer space,"
584 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
586 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
589 * Output Scatter list
593 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
597 if (likely(iv_len)) {
598 i = fill_sg_comp(scatter_comp, i,
599 (uint64_t)offset_vaddr +
604 /* output data or output data + digest*/
605 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
606 size = outputlen - iv_len - mac_len;
608 uint32_t aad_offset =
609 aad_len ? passthrough_len : 0;
612 ROC_SE_SINGLE_BUF_INPLACE)) {
613 i = fill_sg_comp_from_buf_min(
615 fc_params->bufs, &size);
617 i = fill_sg_comp_from_iov(
619 fc_params->dst_iov, 0, &size,
620 aad_buf, aad_offset);
622 if (unlikely(size)) {
623 plt_dp_err("Insufficient buffer"
624 " space, size %d needed",
631 i = fill_sg_comp_from_buf(scatter_comp, i,
632 &fc_params->mac_buf);
635 /* Output including mac */
636 size = outputlen - iv_len;
638 uint32_t aad_offset =
639 aad_len ? passthrough_len : 0;
642 ROC_SE_SINGLE_BUF_INPLACE)) {
643 i = fill_sg_comp_from_buf_min(
645 fc_params->bufs, &size);
647 i = fill_sg_comp_from_iov(
649 fc_params->dst_iov, 0, &size,
650 aad_buf, aad_offset);
652 if (unlikely(size)) {
653 plt_dp_err("Insufficient buffer"
654 " space, size %d needed",
660 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
662 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
664 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
666 /* This is DPTR len in case of SG mode */
667 cpt_inst_w4.s.dlen = size;
669 inst->dptr = (uint64_t)in_buffer;
672 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
673 (auth_offset >> 8))) {
674 plt_dp_err("Offset not supported");
675 plt_dp_err("enc_offset: %d", encr_offset);
676 plt_dp_err("iv_offset : %d", iv_offset);
677 plt_dp_err("auth_offset: %d", auth_offset);
681 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
682 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
683 ((uint64_t)auth_offset));
685 inst->w4.u64 = cpt_inst_w4.u64;
689 static __rte_always_inline int
690 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
691 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
693 uint32_t iv_offset = 0, size;
694 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
695 struct roc_se_ctx *se_ctx;
696 int32_t hash_type, mac_len;
698 struct roc_se_buf_ptr *aad_buf = NULL;
699 uint32_t encr_offset, auth_offset;
700 uint32_t encr_data_len, auth_data_len, aad_len = 0;
701 uint32_t passthrough_len = 0;
702 union cpt_inst_w4 cpt_inst_w4;
706 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
707 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
708 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
709 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
711 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
712 /* We don't support both AAD and auth data separately */
715 aad_len = fc_params->aad_buf.size;
716 aad_buf = &fc_params->aad_buf;
719 se_ctx = fc_params->ctx_buf.vaddr;
720 hash_type = se_ctx->hash_type;
721 mac_len = se_ctx->mac_len;
722 op_minor = se_ctx->template_w4.s.opcode_minor;
724 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
726 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
729 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
731 * When AAD is given, data above encr_offset is pass through
732 * Since AAD is given as separate pointer and not as offset,
733 * this is a special case as we need to fragment input data
734 * into passthrough + encr_data and then insert AAD in between.
736 if (hash_type != ROC_SE_GMAC_TYPE) {
737 passthrough_len = encr_offset;
738 auth_offset = passthrough_len + iv_len;
739 encr_offset = passthrough_len + aad_len + iv_len;
740 auth_data_len = aad_len + encr_data_len;
742 passthrough_len = 16 + aad_len;
743 auth_offset = passthrough_len + iv_len;
744 auth_data_len = aad_len;
747 encr_offset += iv_len;
748 auth_offset += iv_len;
752 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
753 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
754 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
756 if (hash_type == ROC_SE_GMAC_TYPE) {
761 enc_dlen = encr_offset + encr_data_len;
762 auth_dlen = auth_offset + auth_data_len;
764 if (auth_dlen > enc_dlen) {
765 inputlen = auth_dlen + mac_len;
766 outputlen = auth_dlen;
768 inputlen = enc_dlen + mac_len;
769 outputlen = enc_dlen;
772 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
773 outputlen = inputlen = enc_dlen;
775 cpt_inst_w4.s.param1 = encr_data_len;
776 cpt_inst_w4.s.param2 = auth_data_len;
779 * In cn9k, cn10k since we have a limitation of
780 * IV & Offset control word not part of instruction
781 * and need to be part of Data Buffer, we check if
782 * head room is there and then only do the Direct mode processing
784 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
785 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
786 void *dm_vaddr = fc_params->bufs[0].vaddr;
788 /* Use Direct mode */
791 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
792 inst->dptr = (uint64_t)offset_vaddr;
794 /* RPTR should just exclude offset control word */
795 inst->rptr = (uint64_t)dm_vaddr - iv_len;
797 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
799 if (likely(iv_len)) {
800 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
801 ROC_SE_OFF_CTRL_LEN);
802 uint64_t *src = fc_params->iv_buf;
808 void *m_vaddr = fc_params->meta_buf.vaddr;
809 uint32_t g_size_bytes, s_size_bytes;
810 struct roc_se_sglist_comp *gather_comp;
811 struct roc_se_sglist_comp *scatter_comp;
815 /* This falls under strict SG mode */
816 offset_vaddr = m_vaddr;
817 size = ROC_SE_OFF_CTRL_LEN + iv_len;
819 m_vaddr = (uint8_t *)m_vaddr + size;
821 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
823 if (likely(iv_len)) {
824 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
825 ROC_SE_OFF_CTRL_LEN);
826 uint64_t *src = fc_params->iv_buf;
831 /* DPTR has SG list */
834 ((uint16_t *)in_buffer)[0] = 0;
835 ((uint16_t *)in_buffer)[1] = 0;
837 /* TODO Add error check if space will be sufficient */
839 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
846 /* Offset control word that includes iv */
847 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
848 ROC_SE_OFF_CTRL_LEN + iv_len);
851 if (flags & ROC_SE_VALID_MAC_BUF) {
852 size = inputlen - iv_len - mac_len;
854 /* input data only */
856 ROC_SE_SINGLE_BUF_INPLACE)) {
857 i = fill_sg_comp_from_buf_min(
858 gather_comp, i, fc_params->bufs,
861 uint32_t aad_offset =
862 aad_len ? passthrough_len : 0;
864 i = fill_sg_comp_from_iov(
866 fc_params->src_iov, 0, &size,
867 aad_buf, aad_offset);
869 if (unlikely(size)) {
870 plt_dp_err("Insufficient buffer"
871 " space, size %d needed",
879 i = fill_sg_comp_from_buf(gather_comp, i,
880 &fc_params->mac_buf);
883 /* input data + mac */
884 size = inputlen - iv_len;
887 ROC_SE_SINGLE_BUF_INPLACE)) {
888 i = fill_sg_comp_from_buf_min(
889 gather_comp, i, fc_params->bufs,
892 uint32_t aad_offset =
893 aad_len ? passthrough_len : 0;
895 if (unlikely(!fc_params->src_iov)) {
896 plt_dp_err("Bad input args");
900 i = fill_sg_comp_from_iov(
902 fc_params->src_iov, 0, &size,
903 aad_buf, aad_offset);
906 if (unlikely(size)) {
907 plt_dp_err("Insufficient buffer"
908 " space, size %d needed",
914 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
916 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
919 * Output Scatter List
924 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
929 i = fill_sg_comp(scatter_comp, i,
930 (uint64_t)offset_vaddr +
935 /* Add output data */
936 size = outputlen - iv_len;
938 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
939 /* handle single buffer here */
940 i = fill_sg_comp_from_buf_min(scatter_comp, i,
944 uint32_t aad_offset =
945 aad_len ? passthrough_len : 0;
947 if (unlikely(!fc_params->dst_iov)) {
948 plt_dp_err("Bad input args");
952 i = fill_sg_comp_from_iov(
953 scatter_comp, i, fc_params->dst_iov, 0,
954 &size, aad_buf, aad_offset);
957 if (unlikely(size)) {
958 plt_dp_err("Insufficient buffer space,"
965 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
967 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
969 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
971 /* This is DPTR len in case of SG mode */
972 cpt_inst_w4.s.dlen = size;
974 inst->dptr = (uint64_t)in_buffer;
977 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
978 (auth_offset >> 8))) {
979 plt_dp_err("Offset not supported");
980 plt_dp_err("enc_offset: %d", encr_offset);
981 plt_dp_err("iv_offset : %d", iv_offset);
982 plt_dp_err("auth_offset: %d", auth_offset);
986 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
987 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
988 ((uint64_t)auth_offset));
990 inst->w4.u64 = cpt_inst_w4.u64;
994 static __rte_always_inline int
995 cpt_zuc_snow3g_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
996 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
999 int32_t inputlen, outputlen;
1000 struct roc_se_ctx *se_ctx;
1001 uint32_t mac_len = 0;
1002 uint8_t pdcp_alg_type;
1003 uint32_t encr_offset, auth_offset;
1004 uint32_t encr_data_len, auth_data_len;
1006 uint64_t offset_ctrl;
1007 uint64_t *offset_vaddr;
1009 uint8_t pack_iv = 0;
1010 union cpt_inst_w4 cpt_inst_w4;
1012 se_ctx = params->ctx_buf.vaddr;
1013 flags = se_ctx->zsk_flags;
1014 mac_len = se_ctx->mac_len;
1015 pdcp_alg_type = se_ctx->pdcp_alg_type;
1017 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1019 cpt_inst_w4.s.opcode_minor = se_ctx->template_w4.s.opcode_minor;
1022 iv_s = params->auth_iv_buf;
1023 iv_len = params->auth_iv_len;
1031 * Microcode expects offsets in bytes
1032 * TODO: Rounding off
1034 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1037 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
1038 auth_offset = auth_offset / 8;
1040 /* consider iv len */
1041 auth_offset += iv_len;
1043 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1044 outputlen = mac_len;
1046 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1051 iv_s = params->iv_buf;
1052 iv_len = params->cipher_iv_len;
1061 * Microcode expects offsets in bytes
1062 * TODO: Rounding off
1064 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1066 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1067 encr_offset = encr_offset / 8;
1068 /* consider iv len */
1069 encr_offset += iv_len;
1071 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1072 outputlen = inputlen;
1074 /* iv offset is 0 */
1075 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1081 if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1082 plt_dp_err("Offset not supported");
1083 plt_dp_err("enc_offset: %d", encr_offset);
1084 plt_dp_err("auth_offset: %d", auth_offset);
1089 * GP op header, lengths are expected in bits.
1091 cpt_inst_w4.s.param1 = encr_data_len;
1092 cpt_inst_w4.s.param2 = auth_data_len;
1095 * In cn9k, cn10k since we have a limitation of
1096 * IV & Offset control word not part of instruction
1097 * and need to be part of Data Buffer, we check if
1098 * head room is there and then only do the Direct mode processing
1100 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1101 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1102 void *dm_vaddr = params->bufs[0].vaddr;
1104 /* Use Direct mode */
1106 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1107 ROC_SE_OFF_CTRL_LEN - iv_len);
1110 inst->dptr = (uint64_t)offset_vaddr;
1111 /* RPTR should just exclude offset control word */
1112 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1114 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1116 uint8_t *iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1117 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1119 *offset_vaddr = offset_ctrl;
1121 void *m_vaddr = params->meta_buf.vaddr;
1122 uint32_t i, g_size_bytes, s_size_bytes;
1123 struct roc_se_sglist_comp *gather_comp;
1124 struct roc_se_sglist_comp *scatter_comp;
1128 /* save space for iv */
1129 offset_vaddr = m_vaddr;
1131 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN +
1132 RTE_ALIGN_CEIL(iv_len, 8);
1134 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1136 /* DPTR has SG list */
1137 in_buffer = m_vaddr;
1139 ((uint16_t *)in_buffer)[0] = 0;
1140 ((uint16_t *)in_buffer)[1] = 0;
1142 /* TODO Add error check if space will be sufficient */
1144 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1151 /* Offset control word followed by iv */
1153 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1154 ROC_SE_OFF_CTRL_LEN + iv_len);
1156 /* iv offset is 0 */
1157 *offset_vaddr = offset_ctrl;
1159 iv_d = ((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN);
1160 pdcp_iv_copy(iv_d, iv_s, pdcp_alg_type, pack_iv);
1163 size = inputlen - iv_len;
1165 i = fill_sg_comp_from_iov(gather_comp, i,
1166 params->src_iov, 0, &size,
1168 if (unlikely(size)) {
1169 plt_dp_err("Insufficient buffer space,"
1175 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1177 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1180 * Output Scatter List
1185 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1189 /* IV in SLIST only for EEA3 & UEA2 */
1194 i = fill_sg_comp(scatter_comp, i,
1195 (uint64_t)offset_vaddr +
1196 ROC_SE_OFF_CTRL_LEN,
1200 /* Add output data */
1201 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1202 size = outputlen - iv_len - mac_len;
1204 i = fill_sg_comp_from_iov(scatter_comp, i,
1208 if (unlikely(size)) {
1209 plt_dp_err("Insufficient buffer space,"
1218 i = fill_sg_comp_from_buf(scatter_comp, i,
1222 /* Output including mac */
1223 size = outputlen - iv_len;
1225 i = fill_sg_comp_from_iov(scatter_comp, i,
1229 if (unlikely(size)) {
1230 plt_dp_err("Insufficient buffer space,"
1237 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1239 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1241 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1243 /* This is DPTR len in case of SG mode */
1244 cpt_inst_w4.s.dlen = size;
1246 inst->dptr = (uint64_t)in_buffer;
1249 inst->w4.u64 = cpt_inst_w4.u64;
1254 static __rte_always_inline int
1255 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1256 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1258 void *m_vaddr = params->meta_buf.vaddr;
1260 int32_t inputlen = 0, outputlen = 0;
1261 struct roc_se_ctx *se_ctx;
1262 uint32_t mac_len = 0;
1264 uint32_t encr_offset, auth_offset;
1265 uint32_t encr_data_len, auth_data_len;
1267 uint8_t *iv_s, *iv_d, iv_len = 8;
1269 uint64_t *offset_vaddr;
1270 union cpt_inst_w4 cpt_inst_w4;
1272 uint32_t g_size_bytes, s_size_bytes;
1273 struct roc_se_sglist_comp *gather_comp;
1274 struct roc_se_sglist_comp *scatter_comp;
1276 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1277 auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1278 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1279 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1281 se_ctx = params->ctx_buf.vaddr;
1282 flags = se_ctx->zsk_flags;
1283 mac_len = se_ctx->mac_len;
1286 iv_s = params->iv_buf;
1288 iv_s = params->auth_iv_buf;
1290 dir = iv_s[8] & 0x1;
1292 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1294 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1295 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1296 (dir << 4) | (0 << 3) | (flags & 0x7));
1299 * GP op header, lengths are expected in bits.
1301 cpt_inst_w4.s.param1 = encr_data_len;
1302 cpt_inst_w4.s.param2 = auth_data_len;
1304 /* consider iv len */
1306 encr_offset += iv_len;
1307 auth_offset += iv_len;
1310 /* save space for offset ctrl and iv */
1311 offset_vaddr = m_vaddr;
1313 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1315 /* DPTR has SG list */
1316 in_buffer = m_vaddr;
1318 ((uint16_t *)in_buffer)[0] = 0;
1319 ((uint16_t *)in_buffer)[1] = 0;
1321 /* TODO Add error check if space will be sufficient */
1322 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1329 /* Offset control word followed by iv */
1332 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1333 outputlen = inputlen;
1334 /* iv offset is 0 */
1335 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1336 if (unlikely((encr_offset >> 16))) {
1337 plt_dp_err("Offset not supported");
1338 plt_dp_err("enc_offset: %d", encr_offset);
1342 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1343 outputlen = mac_len;
1344 /* iv offset is 0 */
1345 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1346 if (unlikely((auth_offset >> 8))) {
1347 plt_dp_err("Offset not supported");
1348 plt_dp_err("auth_offset: %d", auth_offset);
1353 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1354 ROC_SE_OFF_CTRL_LEN + iv_len);
1357 iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1358 memcpy(iv_d, iv_s, iv_len);
1361 size = inputlen - iv_len;
1363 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1366 if (unlikely(size)) {
1367 plt_dp_err("Insufficient buffer space,"
1373 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1374 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1377 * Output Scatter List
1381 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1385 /* IV in SLIST only for F8 */
1391 i = fill_sg_comp(scatter_comp, i,
1392 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1396 /* Add output data */
1397 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1398 size = outputlen - iv_len - mac_len;
1400 i = fill_sg_comp_from_iov(scatter_comp, i,
1401 params->dst_iov, 0, &size,
1404 if (unlikely(size)) {
1405 plt_dp_err("Insufficient buffer space,"
1414 i = fill_sg_comp_from_buf(scatter_comp, i,
1418 /* Output including mac */
1419 size = outputlen - iv_len;
1421 i = fill_sg_comp_from_iov(scatter_comp, i,
1422 params->dst_iov, 0, &size,
1425 if (unlikely(size)) {
1426 plt_dp_err("Insufficient buffer space,"
1433 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1434 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1436 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1438 /* This is DPTR len in case of SG mode */
1439 cpt_inst_w4.s.dlen = size;
1441 inst->dptr = (uint64_t)in_buffer;
1442 inst->w4.u64 = cpt_inst_w4.u64;
1447 static __rte_always_inline int
1448 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1449 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1451 void *m_vaddr = params->meta_buf.vaddr;
1453 int32_t inputlen = 0, outputlen;
1454 struct roc_se_ctx *se_ctx;
1455 uint8_t i = 0, iv_len = 8;
1456 uint32_t encr_offset;
1457 uint32_t encr_data_len;
1460 uint64_t *offset_vaddr;
1461 union cpt_inst_w4 cpt_inst_w4;
1463 uint32_t g_size_bytes, s_size_bytes;
1464 struct roc_se_sglist_comp *gather_comp;
1465 struct roc_se_sglist_comp *scatter_comp;
1467 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1468 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1470 se_ctx = params->ctx_buf.vaddr;
1471 flags = se_ctx->zsk_flags;
1473 cpt_inst_w4.u64 = 0;
1474 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1476 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1477 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1478 (dir << 4) | (0 << 3) | (flags & 0x7));
1481 * GP op header, lengths are expected in bits.
1483 cpt_inst_w4.s.param1 = encr_data_len;
1485 /* consider iv len */
1486 encr_offset += iv_len;
1488 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1489 outputlen = inputlen;
1491 /* save space for offset ctrl & iv */
1492 offset_vaddr = m_vaddr;
1494 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1496 /* DPTR has SG list */
1497 in_buffer = m_vaddr;
1499 ((uint16_t *)in_buffer)[0] = 0;
1500 ((uint16_t *)in_buffer)[1] = 0;
1502 /* TODO Add error check if space will be sufficient */
1503 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1510 /* Offset control word followed by iv */
1511 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1512 if (unlikely((encr_offset >> 16))) {
1513 plt_dp_err("Offset not supported");
1514 plt_dp_err("enc_offset: %d", encr_offset);
1518 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1519 ROC_SE_OFF_CTRL_LEN + iv_len);
1522 memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1525 /* Add input data */
1526 size = inputlen - iv_len;
1528 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1530 if (unlikely(size)) {
1531 plt_dp_err("Insufficient buffer space,"
1537 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1538 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1541 * Output Scatter List
1545 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1549 i = fill_sg_comp(scatter_comp, i,
1550 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1552 /* Add output data */
1553 size = outputlen - iv_len;
1555 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1557 if (unlikely(size)) {
1558 plt_dp_err("Insufficient buffer space,"
1564 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1565 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1567 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1569 /* This is DPTR len in case of SG mode */
1570 cpt_inst_w4.s.dlen = size;
1572 inst->dptr = (uint64_t)in_buffer;
1573 inst->w4.u64 = cpt_inst_w4.u64;
1578 static __rte_always_inline int
1579 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1580 struct roc_se_fc_params *fc_params,
1581 struct cpt_inst_s *inst)
1583 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1587 fc_type = ctx->fc_type;
1589 if (likely(fc_type == ROC_SE_FC_GEN)) {
1590 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1591 } else if (fc_type == ROC_SE_PDCP) {
1592 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1594 } else if (fc_type == ROC_SE_KASUMI) {
1595 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1599 * For AUTH_ONLY case,
1600 * MC only supports digest generation and verification
1601 * should be done in software by memcmp()
1607 static __rte_always_inline int
1608 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1609 struct roc_se_fc_params *fc_params,
1610 struct cpt_inst_s *inst)
1612 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1616 fc_type = ctx->fc_type;
1618 if (likely(fc_type == ROC_SE_FC_GEN)) {
1619 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1620 } else if (fc_type == ROC_SE_PDCP) {
1621 ret = cpt_zuc_snow3g_prep(flags, d_offs, d_lens, fc_params,
1623 } else if (fc_type == ROC_SE_KASUMI) {
1624 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1626 } else if (fc_type == ROC_SE_HASH_HMAC) {
1627 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1633 static __rte_always_inline int
1634 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1636 struct rte_crypto_aead_xform *aead_form;
1637 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1638 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1639 uint32_t cipher_key_len = 0;
1640 uint8_t aes_gcm = 0;
1641 aead_form = &xform->aead;
1643 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1644 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1645 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1646 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1647 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1648 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1650 plt_dp_err("Unknown aead operation\n");
1653 switch (aead_form->algo) {
1654 case RTE_CRYPTO_AEAD_AES_GCM:
1655 enc_type = ROC_SE_AES_GCM;
1656 cipher_key_len = 16;
1659 case RTE_CRYPTO_AEAD_AES_CCM:
1660 plt_dp_err("Crypto: Unsupported cipher algo %u",
1663 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1664 enc_type = ROC_SE_CHACHA20;
1665 auth_type = ROC_SE_POLY1305;
1666 cipher_key_len = 32;
1667 sess->chacha_poly = 1;
1670 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1674 if (aead_form->key.length < cipher_key_len) {
1675 plt_dp_err("Invalid cipher params keylen %u",
1676 aead_form->key.length);
1680 sess->aes_gcm = aes_gcm;
1681 sess->mac_len = aead_form->digest_length;
1682 sess->iv_offset = aead_form->iv.offset;
1683 sess->iv_length = aead_form->iv.length;
1684 sess->aad_length = aead_form->aad_length;
1686 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1687 aead_form->key.data,
1688 aead_form->key.length, NULL)))
1691 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1692 aead_form->digest_length)))
1698 static __rte_always_inline int
1699 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1701 struct rte_crypto_cipher_xform *c_form;
1702 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1703 uint32_t cipher_key_len = 0;
1704 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1706 c_form = &xform->cipher;
1708 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1709 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1710 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1711 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1712 if (xform->next != NULL &&
1713 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1714 /* Perform decryption followed by auth verify */
1715 sess->roc_se_ctx.template_w4.s.opcode_minor =
1716 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1719 plt_dp_err("Unknown cipher operation\n");
1723 switch (c_form->algo) {
1724 case RTE_CRYPTO_CIPHER_AES_CBC:
1725 enc_type = ROC_SE_AES_CBC;
1726 cipher_key_len = 16;
1728 case RTE_CRYPTO_CIPHER_3DES_CBC:
1729 enc_type = ROC_SE_DES3_CBC;
1730 cipher_key_len = 24;
1732 case RTE_CRYPTO_CIPHER_DES_CBC:
1733 /* DES is implemented using 3DES in hardware */
1734 enc_type = ROC_SE_DES3_CBC;
1737 case RTE_CRYPTO_CIPHER_AES_CTR:
1738 enc_type = ROC_SE_AES_CTR;
1739 cipher_key_len = 16;
1742 case RTE_CRYPTO_CIPHER_NULL:
1746 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1747 enc_type = ROC_SE_KASUMI_F8_ECB;
1748 cipher_key_len = 16;
1749 zsk_flag = ROC_SE_K_F8;
1751 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1752 enc_type = ROC_SE_SNOW3G_UEA2;
1753 cipher_key_len = 16;
1754 zsk_flag = ROC_SE_ZS_EA;
1756 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1757 enc_type = ROC_SE_ZUC_EEA3;
1758 cipher_key_len = c_form->key.length;
1759 zsk_flag = ROC_SE_ZS_EA;
1761 case RTE_CRYPTO_CIPHER_AES_XTS:
1762 enc_type = ROC_SE_AES_XTS;
1763 cipher_key_len = 16;
1765 case RTE_CRYPTO_CIPHER_3DES_ECB:
1766 enc_type = ROC_SE_DES3_ECB;
1767 cipher_key_len = 24;
1769 case RTE_CRYPTO_CIPHER_AES_ECB:
1770 enc_type = ROC_SE_AES_ECB;
1771 cipher_key_len = 16;
1773 case RTE_CRYPTO_CIPHER_3DES_CTR:
1774 case RTE_CRYPTO_CIPHER_AES_F8:
1775 case RTE_CRYPTO_CIPHER_ARC4:
1776 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1779 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1784 if (c_form->key.length < cipher_key_len) {
1785 plt_dp_err("Invalid cipher params keylen %u",
1786 c_form->key.length);
1790 sess->zsk_flag = zsk_flag;
1792 sess->aes_ctr = aes_ctr;
1793 sess->iv_offset = c_form->iv.offset;
1794 sess->iv_length = c_form->iv.length;
1795 sess->is_null = is_null;
1797 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1798 c_form->key.data, c_form->key.length,
1802 if ((enc_type >= ROC_SE_ZUC_EEA3) && (enc_type <= ROC_SE_AES_CTR_EEA2))
1803 roc_se_ctx_swap(&sess->roc_se_ctx);
1807 static __rte_always_inline int
1808 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1810 struct rte_crypto_auth_xform *a_form;
1811 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1812 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1814 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
1815 return fill_sess_gmac(xform, sess);
1817 if (xform->next != NULL &&
1818 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1819 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1820 /* Perform auth followed by encryption */
1821 sess->roc_se_ctx.template_w4.s.opcode_minor =
1822 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1825 a_form = &xform->auth;
1827 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1828 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1829 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1830 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1832 plt_dp_err("Unknown auth operation");
1836 switch (a_form->algo) {
1837 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1839 case RTE_CRYPTO_AUTH_SHA1:
1840 auth_type = ROC_SE_SHA1_TYPE;
1842 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1843 case RTE_CRYPTO_AUTH_SHA256:
1844 auth_type = ROC_SE_SHA2_SHA256;
1846 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1847 case RTE_CRYPTO_AUTH_SHA512:
1848 auth_type = ROC_SE_SHA2_SHA512;
1850 case RTE_CRYPTO_AUTH_AES_GMAC:
1851 auth_type = ROC_SE_GMAC_TYPE;
1854 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1855 case RTE_CRYPTO_AUTH_SHA224:
1856 auth_type = ROC_SE_SHA2_SHA224;
1858 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1859 case RTE_CRYPTO_AUTH_SHA384:
1860 auth_type = ROC_SE_SHA2_SHA384;
1862 case RTE_CRYPTO_AUTH_MD5_HMAC:
1863 case RTE_CRYPTO_AUTH_MD5:
1864 auth_type = ROC_SE_MD5_TYPE;
1866 case RTE_CRYPTO_AUTH_KASUMI_F9:
1867 auth_type = ROC_SE_KASUMI_F9_ECB;
1869 * Indicate that direction needs to be taken out
1872 zsk_flag = ROC_SE_K_F9;
1874 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1875 auth_type = ROC_SE_SNOW3G_UIA2;
1876 zsk_flag = ROC_SE_ZS_IA;
1878 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1879 auth_type = ROC_SE_ZUC_EIA3;
1880 zsk_flag = ROC_SE_ZS_IA;
1882 case RTE_CRYPTO_AUTH_NULL:
1886 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1887 case RTE_CRYPTO_AUTH_AES_CMAC:
1888 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1889 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1892 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1897 sess->zsk_flag = zsk_flag;
1898 sess->aes_gcm = aes_gcm;
1899 sess->mac_len = a_form->digest_length;
1900 sess->is_null = is_null;
1902 sess->auth_iv_offset = a_form->iv.offset;
1903 sess->auth_iv_length = a_form->iv.length;
1905 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1906 a_form->key.data, a_form->key.length,
1907 a_form->digest_length)))
1910 if ((auth_type >= ROC_SE_ZUC_EIA3) &&
1911 (auth_type <= ROC_SE_AES_CMAC_EIA2))
1912 roc_se_ctx_swap(&sess->roc_se_ctx);
1917 static __rte_always_inline int
1918 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1920 struct rte_crypto_auth_xform *a_form;
1921 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1922 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1924 a_form = &xform->auth;
1926 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1927 sess->cpt_op |= ROC_SE_OP_ENCODE;
1928 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1929 sess->cpt_op |= ROC_SE_OP_DECODE;
1931 plt_dp_err("Unknown auth operation");
1935 switch (a_form->algo) {
1936 case RTE_CRYPTO_AUTH_AES_GMAC:
1937 enc_type = ROC_SE_AES_GCM;
1938 auth_type = ROC_SE_GMAC_TYPE;
1941 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1949 sess->iv_offset = a_form->iv.offset;
1950 sess->iv_length = a_form->iv.length;
1951 sess->mac_len = a_form->digest_length;
1953 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1954 a_form->key.data, a_form->key.length,
1958 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1959 a_form->digest_length)))
1965 static __rte_always_inline void *
1966 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1967 struct rte_mempool *cpt_meta_pool,
1968 struct cpt_inflight_req *infl_req)
1972 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1978 infl_req->mdata = mdata;
1979 infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1984 static __rte_always_inline uint32_t
1985 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1986 uint32_t start_offset)
1989 void *seg_data = NULL;
1990 int32_t seg_size = 0;
1997 if (!start_offset) {
1998 seg_data = rte_pktmbuf_mtod(pkt, void *);
1999 seg_size = pkt->data_len;
2001 while (start_offset >= pkt->data_len) {
2002 start_offset -= pkt->data_len;
2006 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2007 seg_size = pkt->data_len - start_offset;
2013 iovec->bufs[index].vaddr = seg_data;
2014 iovec->bufs[index].size = seg_size;
2018 while (unlikely(pkt != NULL)) {
2019 seg_data = rte_pktmbuf_mtod(pkt, void *);
2020 seg_size = pkt->data_len;
2024 iovec->bufs[index].vaddr = seg_data;
2025 iovec->bufs[index].size = seg_size;
2032 iovec->buf_cnt = index;
2036 static __rte_always_inline uint32_t
2037 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2038 struct roc_se_fc_params *param, uint32_t *flags)
2041 void *seg_data = NULL;
2042 uint32_t seg_size = 0;
2043 struct roc_se_iov_ptr *iovec;
2045 seg_data = rte_pktmbuf_mtod(pkt, void *);
2046 seg_size = pkt->data_len;
2049 if (likely(!pkt->next)) {
2052 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2053 headroom = rte_pktmbuf_headroom(pkt);
2054 if (likely(headroom >= 24))
2055 *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2057 param->bufs[0].vaddr = seg_data;
2058 param->bufs[0].size = seg_size;
2061 iovec = param->src_iov;
2062 iovec->bufs[index].vaddr = seg_data;
2063 iovec->bufs[index].size = seg_size;
2067 while (unlikely(pkt != NULL)) {
2068 seg_data = rte_pktmbuf_mtod(pkt, void *);
2069 seg_size = pkt->data_len;
2074 iovec->bufs[index].vaddr = seg_data;
2075 iovec->bufs[index].size = seg_size;
2082 iovec->buf_cnt = index;
2086 static __rte_always_inline int
2087 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2088 struct cpt_qp_meta_info *m_info,
2089 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2091 struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2092 uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2093 struct rte_crypto_sym_op *sym_op = cop->sym;
2095 uint32_t mc_hash_off;
2097 uint64_t d_offs, d_lens;
2098 struct rte_mbuf *m_src, *m_dst;
2099 uint8_t cpt_op = sess->cpt_op;
2100 #ifdef CPT_ALWAYS_USE_SG_MODE
2101 uint8_t inplace = 0;
2103 uint8_t inplace = 1;
2105 struct roc_se_fc_params fc_params;
2106 char src[SRC_IOV_SIZE];
2107 char dst[SRC_IOV_SIZE];
2111 fc_params.cipher_iv_len = sess->iv_length;
2112 fc_params.auth_iv_len = sess->auth_iv_length;
2114 if (likely(sess->iv_length)) {
2115 flags |= ROC_SE_VALID_IV_BUF;
2116 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2118 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2119 memcpy((uint8_t *)iv_buf,
2120 rte_crypto_op_ctod_offset(cop, uint8_t *,
2123 iv_buf[3] = rte_cpu_to_be_32(0x1);
2124 fc_params.iv_buf = iv_buf;
2128 if (sess->zsk_flag) {
2129 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2130 cop, uint8_t *, sess->auth_iv_offset);
2131 if (sess->zsk_flag != ROC_SE_ZS_EA)
2134 m_src = sym_op->m_src;
2135 m_dst = sym_op->m_dst;
2137 if (sess->aes_gcm || sess->chacha_poly) {
2142 d_offs = sym_op->aead.data.offset;
2143 d_lens = sym_op->aead.data.length;
2145 sym_op->aead.data.offset + sym_op->aead.data.length;
2147 aad_data = sym_op->aead.aad.data;
2148 aad_len = sess->aad_length;
2149 if (likely((aad_data + aad_len) ==
2150 rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2151 sym_op->aead.data.offset))) {
2152 d_offs = (d_offs - aad_len) | (d_offs << 16);
2153 d_lens = (d_lens + aad_len) | (d_lens << 32);
2155 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2156 fc_params.aad_buf.size = aad_len;
2157 flags |= ROC_SE_VALID_AAD_BUF;
2159 d_offs = d_offs << 16;
2160 d_lens = d_lens << 32;
2163 salt = fc_params.iv_buf;
2164 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2165 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2166 sess->salt = *(uint32_t *)salt;
2168 fc_params.iv_buf = salt + 4;
2169 if (likely(sess->mac_len)) {
2170 struct rte_mbuf *m =
2171 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2176 /* hmac immediately following data is best case */
2177 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2179 (uint8_t *)sym_op->aead.digest.data)) {
2180 flags |= ROC_SE_VALID_MAC_BUF;
2181 fc_params.mac_buf.size = sess->mac_len;
2182 fc_params.mac_buf.vaddr =
2183 sym_op->aead.digest.data;
2188 d_offs = sym_op->cipher.data.offset;
2189 d_lens = sym_op->cipher.data.length;
2191 sym_op->cipher.data.offset + sym_op->cipher.data.length;
2192 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2193 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2196 (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2197 mc_hash_off = (sym_op->auth.data.offset +
2198 sym_op->auth.data.length);
2200 /* for gmac, salt should be updated like in gcm */
2201 if (unlikely(sess->is_gmac)) {
2203 salt = fc_params.iv_buf;
2204 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2205 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2206 sess->salt = *(uint32_t *)salt;
2208 fc_params.iv_buf = salt + 4;
2210 if (likely(sess->mac_len)) {
2213 m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2217 /* hmac immediately following data is best case */
2218 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2219 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2221 (uint8_t *)sym_op->auth.digest.data))) {
2222 flags |= ROC_SE_VALID_MAC_BUF;
2223 fc_params.mac_buf.size = sess->mac_len;
2224 fc_params.mac_buf.vaddr =
2225 sym_op->auth.digest.data;
2230 fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2232 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2233 unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2236 if (likely(!m_dst && inplace)) {
2237 /* Case of single buffer without AAD buf or
2238 * separate mac buf in place and
2241 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2243 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2245 plt_dp_err("Prepare inplace src iov failed");
2251 /* Out of place processing */
2252 fc_params.src_iov = (void *)src;
2253 fc_params.dst_iov = (void *)dst;
2255 /* Store SG I/O in the api for reuse */
2256 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2257 plt_dp_err("Prepare src iov failed");
2262 if (unlikely(m_dst != NULL)) {
2265 /* Try to make room as much as src has */
2266 pkt_len = rte_pktmbuf_pkt_len(m_dst);
2268 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2269 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2270 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2271 plt_dp_err("Not enough space in "
2280 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2281 plt_dp_err("Prepare dst iov failed for "
2288 fc_params.dst_iov = (void *)src;
2292 if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2293 (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2294 ((ctx->fc_type == ROC_SE_FC_GEN) ||
2295 (ctx->fc_type == ROC_SE_PDCP))))) {
2296 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2297 m_info->pool, infl_req);
2298 if (mdata == NULL) {
2299 plt_dp_err("Error allocating meta buffer for request");
2304 /* Finally prepare the instruction */
2305 if (cpt_op & ROC_SE_OP_ENCODE)
2306 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2309 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2312 if (unlikely(ret)) {
2313 plt_dp_err("Preparing request failed due to bad input arg");
2314 goto free_mdata_and_exit;
2319 free_mdata_and_exit:
2320 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2321 rte_mempool_put(m_info->pool, infl_req->mdata);
2326 static __rte_always_inline void
2327 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2330 struct rte_crypto_sym_op *sym_op = op->sym;
2332 if (sym_op->auth.digest.data)
2333 mac = sym_op->auth.digest.data;
2335 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2336 sym_op->auth.data.length +
2337 sym_op->auth.data.offset);
2339 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2343 if (memcmp(mac, gen_mac, mac_len))
2344 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2346 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2349 static __rte_always_inline void
2350 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2351 uint32_t *addr_length_in_bits,
2352 uint8_t *addr_direction)
2357 while (!found && counter_num_bytes > 0) {
2358 counter_num_bytes--;
2359 if (src[counter_num_bytes] == 0x00)
2361 pos = rte_bsf32(src[counter_num_bytes]);
2363 if (likely(counter_num_bytes > 0)) {
2364 last_byte = src[counter_num_bytes - 1];
2365 *addr_direction = last_byte & 0x1;
2366 *addr_length_in_bits =
2367 counter_num_bytes * 8 - 1;
2370 last_byte = src[counter_num_bytes];
2371 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2372 *addr_length_in_bits =
2373 counter_num_bytes * 8 + (8 - (pos + 2));
2380 * This handles all auth only except AES_GMAC
2382 static __rte_always_inline int
2383 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2384 struct cpt_qp_meta_info *m_info,
2385 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2388 struct rte_crypto_sym_op *sym_op = cop->sym;
2390 uint32_t auth_range_off;
2392 uint64_t d_offs = 0, d_lens;
2393 struct rte_mbuf *m_src, *m_dst;
2394 uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2395 uint16_t mac_len = sess->mac_len;
2396 struct roc_se_fc_params params;
2397 char src[SRC_IOV_SIZE];
2401 memset(¶ms, 0, sizeof(struct roc_se_fc_params));
2403 m_src = sym_op->m_src;
2405 mdata = alloc_op_meta(¶ms.meta_buf, m_info->mlen, m_info->pool,
2407 if (mdata == NULL) {
2412 auth_range_off = sym_op->auth.data.offset;
2414 flags = ROC_SE_VALID_MAC_BUF;
2415 params.src_iov = (void *)src;
2416 if (unlikely(sess->zsk_flag)) {
2418 * Since for Zuc, Kasumi, Snow3g offsets are in bits
2419 * we will send pass through even for auth only case,
2422 d_offs = auth_range_off;
2424 params.auth_iv_len = sess->auth_iv_length;
2425 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2426 cop, uint8_t *, sess->auth_iv_offset);
2427 if (sess->zsk_flag == ROC_SE_K_F9) {
2428 uint32_t length_in_bits, num_bytes;
2429 uint8_t *src, direction = 0;
2432 rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2434 * This is kasumi f9, take direction from
2437 length_in_bits = cop->sym->auth.data.length;
2438 num_bytes = (length_in_bits >> 3);
2439 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2440 find_kasumif9_direction_and_length(
2441 src, num_bytes, &length_in_bits, &direction);
2442 length_in_bits -= 64;
2443 cop->sym->auth.data.offset += 64;
2444 d_offs = cop->sym->auth.data.offset;
2445 auth_range_off = d_offs / 8;
2446 cop->sym->auth.data.length = length_in_bits;
2448 /* Store it at end of auth iv */
2449 iv_buf[8] = direction;
2450 params.auth_iv_buf = iv_buf;
2454 d_lens = sym_op->auth.data.length;
2456 params.ctx_buf.vaddr = &sess->roc_se_ctx;
2458 if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2459 if (sym_op->auth.digest.data) {
2461 * Digest to be generated
2462 * in separate buffer
2464 params.mac_buf.size = sess->mac_len;
2465 params.mac_buf.vaddr = sym_op->auth.digest.data;
2467 uint32_t off = sym_op->auth.data.offset +
2468 sym_op->auth.data.length;
2469 int32_t dlen, space;
2471 m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2472 dlen = rte_pktmbuf_pkt_len(m_dst);
2474 space = off + mac_len - dlen;
2476 if (!rte_pktmbuf_append(m_dst, space)) {
2477 plt_dp_err("Failed to extend "
2481 goto free_mdata_and_exit;
2484 params.mac_buf.vaddr =
2485 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2486 params.mac_buf.size = mac_len;
2489 uint64_t *op = mdata;
2491 /* Need space for storing generated mac */
2492 space += 2 * sizeof(uint64_t);
2494 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2495 params.mac_buf.size = mac_len;
2496 space += RTE_ALIGN_CEIL(mac_len, 8);
2497 op[0] = (uintptr_t)params.mac_buf.vaddr;
2499 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2502 params.meta_buf.vaddr = (uint8_t *)mdata + space;
2503 params.meta_buf.size -= space;
2505 /* Out of place processing */
2506 params.src_iov = (void *)src;
2508 /*Store SG I/O in the api for reuse */
2509 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2510 plt_dp_err("Prepare src iov failed");
2512 goto free_mdata_and_exit;
2515 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, inst);
2517 goto free_mdata_and_exit;
2521 free_mdata_and_exit:
2522 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2523 rte_mempool_put(m_info->pool, infl_req->mdata);
2527 #endif /*_CNXK_SE_H_ */