1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
12 #define SRC_IOV_SIZE \
13 (sizeof(struct roc_se_iov_ptr) + \
14 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE \
16 (sizeof(struct roc_se_iov_ptr) + \
17 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
21 uint16_t zsk_flag : 4;
24 uint16_t chacha_poly : 1;
31 uint8_t auth_iv_length;
33 uint16_t auth_iv_offset;
36 struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
42 uint16_t mac_len = auth->digest_length;
46 case RTE_CRYPTO_AUTH_MD5:
47 case RTE_CRYPTO_AUTH_MD5_HMAC:
48 ret = (mac_len == 16) ? 0 : -1;
50 case RTE_CRYPTO_AUTH_SHA1:
51 case RTE_CRYPTO_AUTH_SHA1_HMAC:
52 ret = (mac_len == 20) ? 0 : -1;
54 case RTE_CRYPTO_AUTH_SHA224:
55 case RTE_CRYPTO_AUTH_SHA224_HMAC:
56 ret = (mac_len == 28) ? 0 : -1;
58 case RTE_CRYPTO_AUTH_SHA256:
59 case RTE_CRYPTO_AUTH_SHA256_HMAC:
60 ret = (mac_len == 32) ? 0 : -1;
62 case RTE_CRYPTO_AUTH_SHA384:
63 case RTE_CRYPTO_AUTH_SHA384_HMAC:
64 ret = (mac_len == 48) ? 0 : -1;
66 case RTE_CRYPTO_AUTH_SHA512:
67 case RTE_CRYPTO_AUTH_SHA512_HMAC:
68 ret = (mac_len == 64) ? 0 : -1;
70 case RTE_CRYPTO_AUTH_NULL:
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
83 struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
91 struct roc_se_sglist_comp *to = &list[i >> 2];
93 to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94 to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101 struct roc_se_buf_ptr *from)
103 struct roc_se_sglist_comp *to = &list[i >> 2];
105 to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113 struct roc_se_buf_ptr *from, uint32_t *psize)
115 struct roc_se_sglist_comp *to = &list[i >> 2];
116 uint32_t size = *psize;
119 e_len = (size > from->size) ? from->size : size;
120 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
128 * This fills the MC expected SGIO list
129 * from IOV given by user.
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133 struct roc_se_iov_ptr *from, uint32_t from_offset,
134 uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135 uint32_t extra_offset)
138 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139 uint32_t size = *psize;
140 struct roc_se_buf_ptr *bufs;
143 for (j = 0; (j < from->buf_cnt) && size; j++) {
146 struct roc_se_sglist_comp *to = &list[i >> 2];
148 if (unlikely(from_offset)) {
149 if (from_offset >= bufs[j].size) {
150 from_offset -= bufs[j].size;
153 e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154 e_len = (size > (bufs[j].size - from_offset)) ?
155 (bufs[j].size - from_offset) :
159 e_vaddr = (uint64_t)bufs[j].vaddr;
160 e_len = (size > bufs[j].size) ? bufs[j].size : size;
163 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
166 if (extra_len && (e_len >= extra_offset)) {
167 /* Break the data at given offset */
168 uint32_t next_len = e_len - extra_offset;
169 uint64_t next_vaddr = e_vaddr + extra_offset;
174 e_len = extra_offset;
176 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
179 extra_len = RTE_MIN(extra_len, size);
180 /* Insert extra data ptr */
185 rte_cpu_to_be_16(extra_len);
186 to->ptr[i % 4] = rte_cpu_to_be_64(
187 (uint64_t)extra_buf->vaddr);
191 next_len = RTE_MIN(next_len, size);
192 /* insert the rest of the data */
196 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
206 extra_offset -= size;
214 static __rte_always_inline int
215 cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
216 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
218 void *m_vaddr = params->meta_buf.vaddr;
220 uint16_t data_len, mac_len, key_len;
221 roc_se_auth_type hash_type;
222 struct roc_se_ctx *ctx;
223 struct roc_se_sglist_comp *gather_comp;
224 struct roc_se_sglist_comp *scatter_comp;
226 uint32_t g_size_bytes, s_size_bytes;
227 union cpt_inst_w4 cpt_inst_w4;
229 ctx = params->ctx_buf.vaddr;
231 hash_type = ctx->hash_type;
232 mac_len = ctx->mac_len;
233 key_len = ctx->auth_key_len;
234 data_len = ROC_SE_AUTH_DLEN(d_lens);
237 cpt_inst_w4.s.opcode_minor = 0;
238 cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
240 cpt_inst_w4.s.opcode_major =
241 ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
242 cpt_inst_w4.s.param1 = key_len;
243 cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
245 cpt_inst_w4.s.opcode_major =
246 ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
247 cpt_inst_w4.s.param1 = 0;
248 cpt_inst_w4.s.dlen = data_len;
251 /* Null auth only case enters the if */
252 if (unlikely(!hash_type && !ctx->enc_cipher)) {
253 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
254 /* Minor op is passthrough */
255 cpt_inst_w4.s.opcode_minor = 0x03;
256 /* Send out completion code only */
257 cpt_inst_w4.s.param2 = 0x1;
260 /* DPTR has SG list */
263 ((uint16_t *)in_buffer)[0] = 0;
264 ((uint16_t *)in_buffer)[1] = 0;
266 /* TODO Add error check if space will be sufficient */
267 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
276 uint64_t k_vaddr = (uint64_t)params->ctx_buf.vaddr +
277 offsetof(struct roc_se_ctx, auth_key);
279 i = fill_sg_comp(gather_comp, i, k_vaddr,
280 RTE_ALIGN_CEIL(key_len, 8));
286 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
288 if (unlikely(size)) {
289 plt_dp_err("Insufficient dst IOV size, short by %dB",
295 * Looks like we need to support zero data
296 * gather ptr in case of hash & hmac
300 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
301 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
308 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
311 if (flags & ROC_SE_VALID_MAC_BUF) {
312 if (unlikely(params->mac_buf.size < mac_len)) {
313 plt_dp_err("Insufficient MAC size");
318 i = fill_sg_comp_from_buf_min(scatter_comp, i, ¶ms->mac_buf,
322 i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
323 data_len, &size, NULL, 0);
324 if (unlikely(size)) {
325 plt_dp_err("Insufficient dst IOV size, short by %dB",
331 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
332 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
334 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
336 /* This is DPTR len in case of SG mode */
337 cpt_inst_w4.s.dlen = size;
339 inst->dptr = (uint64_t)in_buffer;
340 inst->w4.u64 = cpt_inst_w4.u64;
345 static __rte_always_inline int
346 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
347 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
349 uint32_t iv_offset = 0;
350 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
351 struct roc_se_ctx *se_ctx;
352 uint32_t cipher_type, hash_type;
353 uint32_t mac_len, size;
355 struct roc_se_buf_ptr *aad_buf = NULL;
356 uint32_t encr_offset, auth_offset;
357 uint32_t encr_data_len, auth_data_len, aad_len = 0;
358 uint32_t passthrough_len = 0;
359 union cpt_inst_w4 cpt_inst_w4;
363 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
364 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
365 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
366 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
367 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
368 /* We don't support both AAD and auth data separately */
371 aad_len = fc_params->aad_buf.size;
372 aad_buf = &fc_params->aad_buf;
374 se_ctx = fc_params->ctx_buf.vaddr;
375 cipher_type = se_ctx->enc_cipher;
376 hash_type = se_ctx->hash_type;
377 mac_len = se_ctx->mac_len;
378 op_minor = se_ctx->template_w4.s.opcode_minor;
380 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
382 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
385 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
387 * When AAD is given, data above encr_offset is pass through
388 * Since AAD is given as separate pointer and not as offset,
389 * this is a special case as we need to fragment input data
390 * into passthrough + encr_data and then insert AAD in between.
392 if (hash_type != ROC_SE_GMAC_TYPE) {
393 passthrough_len = encr_offset;
394 auth_offset = passthrough_len + iv_len;
395 encr_offset = passthrough_len + aad_len + iv_len;
396 auth_data_len = aad_len + encr_data_len;
398 passthrough_len = 16 + aad_len;
399 auth_offset = passthrough_len + iv_len;
400 auth_data_len = aad_len;
403 encr_offset += iv_len;
404 auth_offset += iv_len;
408 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
409 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
410 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
412 if (hash_type == ROC_SE_GMAC_TYPE) {
417 auth_dlen = auth_offset + auth_data_len;
418 enc_dlen = encr_data_len + encr_offset;
419 if (unlikely(encr_data_len & 0xf)) {
420 if ((cipher_type == ROC_SE_DES3_CBC) ||
421 (cipher_type == ROC_SE_DES3_ECB))
423 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
424 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
425 (cipher_type == ROC_SE_AES_ECB)))
427 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
430 if (unlikely(auth_dlen > enc_dlen)) {
431 inputlen = auth_dlen;
432 outputlen = auth_dlen + mac_len;
435 outputlen = enc_dlen + mac_len;
438 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
439 outputlen = enc_dlen;
442 cpt_inst_w4.s.param1 = encr_data_len;
443 cpt_inst_w4.s.param2 = auth_data_len;
446 * In cn9k, cn10k since we have a limitation of
447 * IV & Offset control word not part of instruction
448 * and need to be part of Data Buffer, we check if
449 * head room is there and then only do the Direct mode processing
451 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
452 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
453 void *dm_vaddr = fc_params->bufs[0].vaddr;
455 /* Use Direct mode */
458 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
461 inst->dptr = (uint64_t)offset_vaddr;
463 /* RPTR should just exclude offset control word */
464 inst->rptr = (uint64_t)dm_vaddr - iv_len;
466 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
468 if (likely(iv_len)) {
469 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
470 ROC_SE_OFF_CTRL_LEN);
471 uint64_t *src = fc_params->iv_buf;
477 void *m_vaddr = fc_params->meta_buf.vaddr;
478 uint32_t i, g_size_bytes, s_size_bytes;
479 struct roc_se_sglist_comp *gather_comp;
480 struct roc_se_sglist_comp *scatter_comp;
483 /* This falls under strict SG mode */
484 offset_vaddr = m_vaddr;
485 size = ROC_SE_OFF_CTRL_LEN + iv_len;
487 m_vaddr = (uint8_t *)m_vaddr + size;
489 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
491 if (likely(iv_len)) {
492 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
493 ROC_SE_OFF_CTRL_LEN);
494 uint64_t *src = fc_params->iv_buf;
499 /* DPTR has SG list */
502 ((uint16_t *)in_buffer)[0] = 0;
503 ((uint16_t *)in_buffer)[1] = 0;
505 /* TODO Add error check if space will be sufficient */
507 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
515 /* Offset control word that includes iv */
516 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
517 ROC_SE_OFF_CTRL_LEN + iv_len);
520 size = inputlen - iv_len;
522 uint32_t aad_offset = aad_len ? passthrough_len : 0;
524 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
525 i = fill_sg_comp_from_buf_min(
526 gather_comp, i, fc_params->bufs, &size);
528 i = fill_sg_comp_from_iov(
529 gather_comp, i, fc_params->src_iov, 0,
530 &size, aad_buf, aad_offset);
533 if (unlikely(size)) {
534 plt_dp_err("Insufficient buffer space,"
540 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
542 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
545 * Output Scatter list
549 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
553 if (likely(iv_len)) {
554 i = fill_sg_comp(scatter_comp, i,
555 (uint64_t)offset_vaddr +
560 /* output data or output data + digest*/
561 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
562 size = outputlen - iv_len - mac_len;
564 uint32_t aad_offset =
565 aad_len ? passthrough_len : 0;
568 ROC_SE_SINGLE_BUF_INPLACE)) {
569 i = fill_sg_comp_from_buf_min(
571 fc_params->bufs, &size);
573 i = fill_sg_comp_from_iov(
575 fc_params->dst_iov, 0, &size,
576 aad_buf, aad_offset);
578 if (unlikely(size)) {
579 plt_dp_err("Insufficient buffer"
580 " space, size %d needed",
587 i = fill_sg_comp_from_buf(scatter_comp, i,
588 &fc_params->mac_buf);
591 /* Output including mac */
592 size = outputlen - iv_len;
594 uint32_t aad_offset =
595 aad_len ? passthrough_len : 0;
598 ROC_SE_SINGLE_BUF_INPLACE)) {
599 i = fill_sg_comp_from_buf_min(
601 fc_params->bufs, &size);
603 i = fill_sg_comp_from_iov(
605 fc_params->dst_iov, 0, &size,
606 aad_buf, aad_offset);
608 if (unlikely(size)) {
609 plt_dp_err("Insufficient buffer"
610 " space, size %d needed",
616 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
618 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
620 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
622 /* This is DPTR len in case of SG mode */
623 cpt_inst_w4.s.dlen = size;
625 inst->dptr = (uint64_t)in_buffer;
628 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
629 (auth_offset >> 8))) {
630 plt_dp_err("Offset not supported");
631 plt_dp_err("enc_offset: %d", encr_offset);
632 plt_dp_err("iv_offset : %d", iv_offset);
633 plt_dp_err("auth_offset: %d", auth_offset);
637 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
638 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
639 ((uint64_t)auth_offset));
641 inst->w4.u64 = cpt_inst_w4.u64;
645 static __rte_always_inline int
646 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
647 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
649 uint32_t iv_offset = 0, size;
650 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
651 struct roc_se_ctx *se_ctx;
652 int32_t hash_type, mac_len;
654 struct roc_se_buf_ptr *aad_buf = NULL;
655 uint32_t encr_offset, auth_offset;
656 uint32_t encr_data_len, auth_data_len, aad_len = 0;
657 uint32_t passthrough_len = 0;
658 union cpt_inst_w4 cpt_inst_w4;
662 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
663 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
664 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
665 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
667 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
668 /* We don't support both AAD and auth data separately */
671 aad_len = fc_params->aad_buf.size;
672 aad_buf = &fc_params->aad_buf;
675 se_ctx = fc_params->ctx_buf.vaddr;
676 hash_type = se_ctx->hash_type;
677 mac_len = se_ctx->mac_len;
678 op_minor = se_ctx->template_w4.s.opcode_minor;
680 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
682 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
685 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
687 * When AAD is given, data above encr_offset is pass through
688 * Since AAD is given as separate pointer and not as offset,
689 * this is a special case as we need to fragment input data
690 * into passthrough + encr_data and then insert AAD in between.
692 if (hash_type != ROC_SE_GMAC_TYPE) {
693 passthrough_len = encr_offset;
694 auth_offset = passthrough_len + iv_len;
695 encr_offset = passthrough_len + aad_len + iv_len;
696 auth_data_len = aad_len + encr_data_len;
698 passthrough_len = 16 + aad_len;
699 auth_offset = passthrough_len + iv_len;
700 auth_data_len = aad_len;
703 encr_offset += iv_len;
704 auth_offset += iv_len;
708 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
709 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
710 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
712 if (hash_type == ROC_SE_GMAC_TYPE) {
717 enc_dlen = encr_offset + encr_data_len;
718 auth_dlen = auth_offset + auth_data_len;
720 if (auth_dlen > enc_dlen) {
721 inputlen = auth_dlen + mac_len;
722 outputlen = auth_dlen;
724 inputlen = enc_dlen + mac_len;
725 outputlen = enc_dlen;
728 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
729 outputlen = inputlen = enc_dlen;
731 cpt_inst_w4.s.param1 = encr_data_len;
732 cpt_inst_w4.s.param2 = auth_data_len;
735 * In cn9k, cn10k since we have a limitation of
736 * IV & Offset control word not part of instruction
737 * and need to be part of Data Buffer, we check if
738 * head room is there and then only do the Direct mode processing
740 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
741 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
742 void *dm_vaddr = fc_params->bufs[0].vaddr;
744 /* Use Direct mode */
747 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
748 inst->dptr = (uint64_t)offset_vaddr;
750 /* RPTR should just exclude offset control word */
751 inst->rptr = (uint64_t)dm_vaddr - iv_len;
753 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
755 if (likely(iv_len)) {
756 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
757 ROC_SE_OFF_CTRL_LEN);
758 uint64_t *src = fc_params->iv_buf;
764 void *m_vaddr = fc_params->meta_buf.vaddr;
765 uint32_t g_size_bytes, s_size_bytes;
766 struct roc_se_sglist_comp *gather_comp;
767 struct roc_se_sglist_comp *scatter_comp;
771 /* This falls under strict SG mode */
772 offset_vaddr = m_vaddr;
773 size = ROC_SE_OFF_CTRL_LEN + iv_len;
775 m_vaddr = (uint8_t *)m_vaddr + size;
777 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
779 if (likely(iv_len)) {
780 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
781 ROC_SE_OFF_CTRL_LEN);
782 uint64_t *src = fc_params->iv_buf;
787 /* DPTR has SG list */
790 ((uint16_t *)in_buffer)[0] = 0;
791 ((uint16_t *)in_buffer)[1] = 0;
793 /* TODO Add error check if space will be sufficient */
795 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
802 /* Offset control word that includes iv */
803 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
804 ROC_SE_OFF_CTRL_LEN + iv_len);
807 if (flags & ROC_SE_VALID_MAC_BUF) {
808 size = inputlen - iv_len - mac_len;
810 /* input data only */
812 ROC_SE_SINGLE_BUF_INPLACE)) {
813 i = fill_sg_comp_from_buf_min(
814 gather_comp, i, fc_params->bufs,
817 uint32_t aad_offset =
818 aad_len ? passthrough_len : 0;
820 i = fill_sg_comp_from_iov(
822 fc_params->src_iov, 0, &size,
823 aad_buf, aad_offset);
825 if (unlikely(size)) {
826 plt_dp_err("Insufficient buffer"
827 " space, size %d needed",
835 i = fill_sg_comp_from_buf(gather_comp, i,
836 &fc_params->mac_buf);
839 /* input data + mac */
840 size = inputlen - iv_len;
843 ROC_SE_SINGLE_BUF_INPLACE)) {
844 i = fill_sg_comp_from_buf_min(
845 gather_comp, i, fc_params->bufs,
848 uint32_t aad_offset =
849 aad_len ? passthrough_len : 0;
851 if (unlikely(!fc_params->src_iov)) {
852 plt_dp_err("Bad input args");
856 i = fill_sg_comp_from_iov(
858 fc_params->src_iov, 0, &size,
859 aad_buf, aad_offset);
862 if (unlikely(size)) {
863 plt_dp_err("Insufficient buffer"
864 " space, size %d needed",
870 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
872 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
875 * Output Scatter List
880 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
885 i = fill_sg_comp(scatter_comp, i,
886 (uint64_t)offset_vaddr +
891 /* Add output data */
892 size = outputlen - iv_len;
894 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
895 /* handle single buffer here */
896 i = fill_sg_comp_from_buf_min(scatter_comp, i,
900 uint32_t aad_offset =
901 aad_len ? passthrough_len : 0;
903 if (unlikely(!fc_params->dst_iov)) {
904 plt_dp_err("Bad input args");
908 i = fill_sg_comp_from_iov(
909 scatter_comp, i, fc_params->dst_iov, 0,
910 &size, aad_buf, aad_offset);
913 if (unlikely(size)) {
914 plt_dp_err("Insufficient buffer space,"
921 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
923 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
925 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
927 /* This is DPTR len in case of SG mode */
928 cpt_inst_w4.s.dlen = size;
930 inst->dptr = (uint64_t)in_buffer;
933 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
934 (auth_offset >> 8))) {
935 plt_dp_err("Offset not supported");
936 plt_dp_err("enc_offset: %d", encr_offset);
937 plt_dp_err("iv_offset : %d", iv_offset);
938 plt_dp_err("auth_offset: %d", auth_offset);
942 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
943 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
944 ((uint64_t)auth_offset));
946 inst->w4.u64 = cpt_inst_w4.u64;
950 static __rte_always_inline int
951 cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
952 struct roc_se_fc_params *params,
953 struct cpt_inst_s *inst)
956 int32_t inputlen, outputlen;
957 struct roc_se_ctx *se_ctx;
958 uint32_t mac_len = 0;
959 uint8_t pdcp_alg_type, j;
960 uint32_t encr_offset = 0, auth_offset = 0;
961 uint32_t encr_data_len = 0, auth_data_len = 0;
962 int flags, iv_len = 16;
963 uint64_t offset_ctrl;
964 uint64_t *offset_vaddr;
965 uint32_t *iv_s, iv[4];
966 union cpt_inst_w4 cpt_inst_w4;
968 se_ctx = params->ctx_buf.vaddr;
969 flags = se_ctx->zsk_flags;
970 mac_len = se_ctx->mac_len;
971 pdcp_alg_type = se_ctx->pdcp_alg_type;
973 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
975 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
977 cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
978 (0 << 4) | (0 << 3) | (flags & 0x7));
982 * Microcode expects offsets in bytes
985 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
988 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
989 auth_offset = auth_offset / 8;
991 /* consider iv len */
992 auth_offset += iv_len;
994 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
997 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1002 * Microcode expects offsets in bytes
1003 * TODO: Rounding off
1005 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1007 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
1008 encr_offset = encr_offset / 8;
1009 /* consider iv len */
1010 encr_offset += iv_len;
1012 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1013 outputlen = inputlen;
1015 /* iv offset is 0 */
1016 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1019 if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
1020 plt_dp_err("Offset not supported");
1021 plt_dp_err("enc_offset: %d", encr_offset);
1022 plt_dp_err("auth_offset: %d", auth_offset);
1027 iv_s = (flags == 0x1) ? params->auth_iv_buf : params->iv_buf;
1029 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1031 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1032 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1035 for (j = 0; j < 4; j++)
1036 iv[j] = iv_s[3 - j];
1038 /* ZUC doesn't need a swap */
1039 for (j = 0; j < 4; j++)
1044 * GP op header, lengths are expected in bits.
1046 cpt_inst_w4.s.param1 = encr_data_len;
1047 cpt_inst_w4.s.param2 = auth_data_len;
1050 * In cn9k, cn10k since we have a limitation of
1051 * IV & Offset control word not part of instruction
1052 * and need to be part of Data Buffer, we check if
1053 * head room is there and then only do the Direct mode processing
1055 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1056 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1057 void *dm_vaddr = params->bufs[0].vaddr;
1059 /* Use Direct mode */
1061 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1062 ROC_SE_OFF_CTRL_LEN - iv_len);
1065 inst->dptr = (uint64_t)offset_vaddr;
1066 /* RPTR should just exclude offset control word */
1067 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1069 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1071 if (likely(iv_len)) {
1072 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1073 ROC_SE_OFF_CTRL_LEN);
1074 memcpy(iv_d, iv, 16);
1077 *offset_vaddr = offset_ctrl;
1079 void *m_vaddr = params->meta_buf.vaddr;
1080 uint32_t i, g_size_bytes, s_size_bytes;
1081 struct roc_se_sglist_comp *gather_comp;
1082 struct roc_se_sglist_comp *scatter_comp;
1086 /* save space for iv */
1087 offset_vaddr = m_vaddr;
1089 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1091 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1093 /* DPTR has SG list */
1094 in_buffer = m_vaddr;
1096 ((uint16_t *)in_buffer)[0] = 0;
1097 ((uint16_t *)in_buffer)[1] = 0;
1099 /* TODO Add error check if space will be sufficient */
1101 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1108 /* Offset control word followed by iv */
1110 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1111 ROC_SE_OFF_CTRL_LEN + iv_len);
1113 /* iv offset is 0 */
1114 *offset_vaddr = offset_ctrl;
1116 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1117 ROC_SE_OFF_CTRL_LEN);
1118 memcpy(iv_d, iv, 16);
1121 size = inputlen - iv_len;
1123 i = fill_sg_comp_from_iov(gather_comp, i,
1124 params->src_iov, 0, &size,
1126 if (unlikely(size)) {
1127 plt_dp_err("Insufficient buffer space,"
1133 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1135 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1138 * Output Scatter List
1143 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1147 /* IV in SLIST only for EEA3 & UEA2 */
1152 i = fill_sg_comp(scatter_comp, i,
1153 (uint64_t)offset_vaddr +
1154 ROC_SE_OFF_CTRL_LEN,
1158 /* Add output data */
1159 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1160 size = outputlen - iv_len - mac_len;
1162 i = fill_sg_comp_from_iov(scatter_comp, i,
1166 if (unlikely(size)) {
1167 plt_dp_err("Insufficient buffer space,"
1176 i = fill_sg_comp_from_buf(scatter_comp, i,
1180 /* Output including mac */
1181 size = outputlen - iv_len;
1183 i = fill_sg_comp_from_iov(scatter_comp, i,
1187 if (unlikely(size)) {
1188 plt_dp_err("Insufficient buffer space,"
1195 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1197 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1199 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1201 /* This is DPTR len in case of SG mode */
1202 cpt_inst_w4.s.dlen = size;
1204 inst->dptr = (uint64_t)in_buffer;
1207 inst->w4.u64 = cpt_inst_w4.u64;
1212 static __rte_always_inline int
1213 cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1214 struct roc_se_fc_params *params,
1215 struct cpt_inst_s *inst)
1218 int32_t inputlen = 0, outputlen;
1219 struct roc_se_ctx *se_ctx;
1220 uint8_t pdcp_alg_type, iv_len = 16;
1221 uint32_t encr_offset;
1222 uint32_t encr_data_len;
1224 uint64_t *offset_vaddr;
1225 uint32_t *iv_s, iv[4], j;
1226 union cpt_inst_w4 cpt_inst_w4;
1229 * Microcode expects offsets in bytes
1230 * TODO: Rounding off
1232 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1233 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1235 se_ctx = params->ctx_buf.vaddr;
1236 flags = se_ctx->zsk_flags;
1237 pdcp_alg_type = se_ctx->pdcp_alg_type;
1239 cpt_inst_w4.u64 = 0;
1240 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
1242 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1244 cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
1245 (0 << 4) | (0 << 3) | (flags & 0x7));
1247 /* consider iv len */
1248 encr_offset += iv_len;
1250 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1251 outputlen = inputlen;
1254 iv_s = params->iv_buf;
1255 if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
1257 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1258 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1261 for (j = 0; j < 4; j++)
1262 iv[j] = iv_s[3 - j];
1264 /* ZUC doesn't need a swap */
1265 for (j = 0; j < 4; j++)
1270 * GP op header, lengths are expected in bits.
1272 cpt_inst_w4.s.param1 = encr_data_len;
1275 * In cn9k, cn10k since we have a limitation of
1276 * IV & Offset control word not part of instruction
1277 * and need to be part of Data Buffer, we check if
1278 * head room is there and then only do the Direct mode processing
1280 if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1281 (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
1282 void *dm_vaddr = params->bufs[0].vaddr;
1284 /* Use Direct mode */
1286 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1287 ROC_SE_OFF_CTRL_LEN - iv_len);
1290 inst->dptr = (uint64_t)offset_vaddr;
1292 /* RPTR should just exclude offset control word */
1293 inst->rptr = (uint64_t)dm_vaddr - iv_len;
1295 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
1297 if (likely(iv_len)) {
1298 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1299 ROC_SE_OFF_CTRL_LEN);
1300 memcpy(iv_d, iv, 16);
1303 /* iv offset is 0 */
1304 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1306 void *m_vaddr = params->meta_buf.vaddr;
1307 uint32_t i, g_size_bytes, s_size_bytes;
1308 struct roc_se_sglist_comp *gather_comp;
1309 struct roc_se_sglist_comp *scatter_comp;
1313 /* save space for offset and iv... */
1314 offset_vaddr = m_vaddr;
1316 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1318 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
1320 /* DPTR has SG list */
1321 in_buffer = m_vaddr;
1323 ((uint16_t *)in_buffer)[0] = 0;
1324 ((uint16_t *)in_buffer)[1] = 0;
1326 /* TODO Add error check if space will be sufficient */
1328 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1335 /* Offset control word */
1337 /* iv offset is 0 */
1338 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1340 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1341 ROC_SE_OFF_CTRL_LEN + iv_len);
1343 iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
1344 ROC_SE_OFF_CTRL_LEN);
1345 memcpy(iv_d, iv, 16);
1347 /* Add input data */
1348 size = inputlen - iv_len;
1350 i = fill_sg_comp_from_iov(gather_comp, i,
1351 params->src_iov, 0, &size,
1353 if (unlikely(size)) {
1354 plt_dp_err("Insufficient buffer space,"
1360 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1362 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1365 * Output Scatter List
1370 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1374 i = fill_sg_comp(scatter_comp, i,
1375 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1378 /* Add output data */
1379 size = outputlen - iv_len;
1381 i = fill_sg_comp_from_iov(scatter_comp, i,
1382 params->dst_iov, 0, &size,
1385 if (unlikely(size)) {
1386 plt_dp_err("Insufficient buffer space,"
1392 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1394 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1396 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1398 /* This is DPTR len in case of SG mode */
1399 cpt_inst_w4.s.dlen = size;
1401 inst->dptr = (uint64_t)in_buffer;
1404 if (unlikely((encr_offset >> 16))) {
1405 plt_dp_err("Offset not supported");
1406 plt_dp_err("enc_offset: %d", encr_offset);
1410 inst->w4.u64 = cpt_inst_w4.u64;
1415 static __rte_always_inline int
1416 cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
1417 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1419 void *m_vaddr = params->meta_buf.vaddr;
1421 int32_t inputlen = 0, outputlen = 0;
1422 struct roc_se_ctx *se_ctx;
1423 uint32_t mac_len = 0;
1425 uint32_t encr_offset, auth_offset;
1426 uint32_t encr_data_len, auth_data_len;
1428 uint8_t *iv_s, *iv_d, iv_len = 8;
1430 uint64_t *offset_vaddr;
1431 union cpt_inst_w4 cpt_inst_w4;
1433 uint32_t g_size_bytes, s_size_bytes;
1434 struct roc_se_sglist_comp *gather_comp;
1435 struct roc_se_sglist_comp *scatter_comp;
1437 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1438 auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
1439 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1440 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
1442 se_ctx = params->ctx_buf.vaddr;
1443 flags = se_ctx->zsk_flags;
1444 mac_len = se_ctx->mac_len;
1447 iv_s = params->iv_buf;
1449 iv_s = params->auth_iv_buf;
1451 dir = iv_s[8] & 0x1;
1453 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1455 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1456 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1457 (dir << 4) | (0 << 3) | (flags & 0x7));
1460 * GP op header, lengths are expected in bits.
1462 cpt_inst_w4.s.param1 = encr_data_len;
1463 cpt_inst_w4.s.param2 = auth_data_len;
1465 /* consider iv len */
1467 encr_offset += iv_len;
1468 auth_offset += iv_len;
1471 /* save space for offset ctrl and iv */
1472 offset_vaddr = m_vaddr;
1474 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1476 /* DPTR has SG list */
1477 in_buffer = m_vaddr;
1479 ((uint16_t *)in_buffer)[0] = 0;
1480 ((uint16_t *)in_buffer)[1] = 0;
1482 /* TODO Add error check if space will be sufficient */
1483 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1490 /* Offset control word followed by iv */
1493 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1494 outputlen = inputlen;
1495 /* iv offset is 0 */
1496 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1497 if (unlikely((encr_offset >> 16))) {
1498 plt_dp_err("Offset not supported");
1499 plt_dp_err("enc_offset: %d", encr_offset);
1503 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1504 outputlen = mac_len;
1505 /* iv offset is 0 */
1506 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1507 if (unlikely((auth_offset >> 8))) {
1508 plt_dp_err("Offset not supported");
1509 plt_dp_err("auth_offset: %d", auth_offset);
1514 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1515 ROC_SE_OFF_CTRL_LEN + iv_len);
1518 iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
1519 memcpy(iv_d, iv_s, iv_len);
1522 size = inputlen - iv_len;
1524 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1527 if (unlikely(size)) {
1528 plt_dp_err("Insufficient buffer space,"
1534 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1535 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1538 * Output Scatter List
1542 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1546 /* IV in SLIST only for F8 */
1552 i = fill_sg_comp(scatter_comp, i,
1553 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
1557 /* Add output data */
1558 if (req_flags & ROC_SE_VALID_MAC_BUF) {
1559 size = outputlen - iv_len - mac_len;
1561 i = fill_sg_comp_from_iov(scatter_comp, i,
1562 params->dst_iov, 0, &size,
1565 if (unlikely(size)) {
1566 plt_dp_err("Insufficient buffer space,"
1575 i = fill_sg_comp_from_buf(scatter_comp, i,
1579 /* Output including mac */
1580 size = outputlen - iv_len;
1582 i = fill_sg_comp_from_iov(scatter_comp, i,
1583 params->dst_iov, 0, &size,
1586 if (unlikely(size)) {
1587 plt_dp_err("Insufficient buffer space,"
1594 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1595 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1597 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1599 /* This is DPTR len in case of SG mode */
1600 cpt_inst_w4.s.dlen = size;
1602 inst->dptr = (uint64_t)in_buffer;
1603 inst->w4.u64 = cpt_inst_w4.u64;
1608 static __rte_always_inline int
1609 cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
1610 struct roc_se_fc_params *params, struct cpt_inst_s *inst)
1612 void *m_vaddr = params->meta_buf.vaddr;
1614 int32_t inputlen = 0, outputlen;
1615 struct roc_se_ctx *se_ctx;
1616 uint8_t i = 0, iv_len = 8;
1617 uint32_t encr_offset;
1618 uint32_t encr_data_len;
1621 uint64_t *offset_vaddr;
1622 union cpt_inst_w4 cpt_inst_w4;
1624 uint32_t g_size_bytes, s_size_bytes;
1625 struct roc_se_sglist_comp *gather_comp;
1626 struct roc_se_sglist_comp *scatter_comp;
1628 encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
1629 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
1631 se_ctx = params->ctx_buf.vaddr;
1632 flags = se_ctx->zsk_flags;
1634 cpt_inst_w4.u64 = 0;
1635 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
1637 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1638 cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
1639 (dir << 4) | (0 << 3) | (flags & 0x7));
1642 * GP op header, lengths are expected in bits.
1644 cpt_inst_w4.s.param1 = encr_data_len;
1646 /* consider iv len */
1647 encr_offset += iv_len;
1649 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
1650 outputlen = inputlen;
1652 /* save space for offset ctrl & iv */
1653 offset_vaddr = m_vaddr;
1655 m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
1657 /* DPTR has SG list */
1658 in_buffer = m_vaddr;
1660 ((uint16_t *)in_buffer)[0] = 0;
1661 ((uint16_t *)in_buffer)[1] = 0;
1663 /* TODO Add error check if space will be sufficient */
1664 gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
1671 /* Offset control word followed by iv */
1672 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1673 if (unlikely((encr_offset >> 16))) {
1674 plt_dp_err("Offset not supported");
1675 plt_dp_err("enc_offset: %d", encr_offset);
1679 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
1680 ROC_SE_OFF_CTRL_LEN + iv_len);
1683 memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
1686 /* Add input data */
1687 size = inputlen - iv_len;
1689 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
1691 if (unlikely(size)) {
1692 plt_dp_err("Insufficient buffer space,"
1698 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1699 g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1702 * Output Scatter List
1706 scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
1710 i = fill_sg_comp(scatter_comp, i,
1711 (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
1713 /* Add output data */
1714 size = outputlen - iv_len;
1716 i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
1718 if (unlikely(size)) {
1719 plt_dp_err("Insufficient buffer space,"
1725 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1726 s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
1728 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
1730 /* This is DPTR len in case of SG mode */
1731 cpt_inst_w4.s.dlen = size;
1733 inst->dptr = (uint64_t)in_buffer;
1734 inst->w4.u64 = cpt_inst_w4.u64;
1739 static __rte_always_inline int
1740 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1741 struct roc_se_fc_params *fc_params,
1742 struct cpt_inst_s *inst)
1744 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1748 fc_type = ctx->fc_type;
1750 if (likely(fc_type == ROC_SE_FC_GEN)) {
1751 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1752 } else if (fc_type == ROC_SE_PDCP) {
1753 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
1755 } else if (fc_type == ROC_SE_KASUMI) {
1756 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
1760 * For AUTH_ONLY case,
1761 * MC only supports digest generation and verification
1762 * should be done in software by memcmp()
1768 static __rte_always_inline int
1769 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
1770 struct roc_se_fc_params *fc_params,
1771 struct cpt_inst_s *inst)
1773 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
1777 fc_type = ctx->fc_type;
1779 if (likely(fc_type == ROC_SE_FC_GEN)) {
1780 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
1781 } else if (fc_type == ROC_SE_PDCP) {
1782 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
1784 } else if (fc_type == ROC_SE_KASUMI) {
1785 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
1787 } else if (fc_type == ROC_SE_HASH_HMAC) {
1788 ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
1794 static __rte_always_inline int
1795 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1797 struct rte_crypto_aead_xform *aead_form;
1798 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1799 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1800 uint32_t cipher_key_len = 0;
1801 uint8_t aes_gcm = 0;
1802 aead_form = &xform->aead;
1804 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
1805 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1806 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1807 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
1808 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1809 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1811 plt_dp_err("Unknown aead operation\n");
1814 switch (aead_form->algo) {
1815 case RTE_CRYPTO_AEAD_AES_GCM:
1816 enc_type = ROC_SE_AES_GCM;
1817 cipher_key_len = 16;
1820 case RTE_CRYPTO_AEAD_AES_CCM:
1821 plt_dp_err("Crypto: Unsupported cipher algo %u",
1824 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
1825 enc_type = ROC_SE_CHACHA20;
1826 auth_type = ROC_SE_POLY1305;
1827 cipher_key_len = 32;
1828 sess->chacha_poly = 1;
1831 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1835 if (aead_form->key.length < cipher_key_len) {
1836 plt_dp_err("Invalid cipher params keylen %u",
1837 aead_form->key.length);
1841 sess->aes_gcm = aes_gcm;
1842 sess->mac_len = aead_form->digest_length;
1843 sess->iv_offset = aead_form->iv.offset;
1844 sess->iv_length = aead_form->iv.length;
1845 sess->aad_length = aead_form->aad_length;
1847 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1848 aead_form->key.data,
1849 aead_form->key.length, NULL)))
1852 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1853 aead_form->digest_length)))
1859 static __rte_always_inline int
1860 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1862 struct rte_crypto_cipher_xform *c_form;
1863 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1864 uint32_t cipher_key_len = 0;
1865 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
1867 c_form = &xform->cipher;
1869 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1870 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
1871 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1872 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
1873 if (xform->next != NULL &&
1874 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1875 /* Perform decryption followed by auth verify */
1876 sess->roc_se_ctx.template_w4.s.opcode_minor =
1877 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1880 plt_dp_err("Unknown cipher operation\n");
1884 switch (c_form->algo) {
1885 case RTE_CRYPTO_CIPHER_AES_CBC:
1886 enc_type = ROC_SE_AES_CBC;
1887 cipher_key_len = 16;
1889 case RTE_CRYPTO_CIPHER_3DES_CBC:
1890 enc_type = ROC_SE_DES3_CBC;
1891 cipher_key_len = 24;
1893 case RTE_CRYPTO_CIPHER_DES_CBC:
1894 /* DES is implemented using 3DES in hardware */
1895 enc_type = ROC_SE_DES3_CBC;
1898 case RTE_CRYPTO_CIPHER_AES_CTR:
1899 enc_type = ROC_SE_AES_CTR;
1900 cipher_key_len = 16;
1903 case RTE_CRYPTO_CIPHER_NULL:
1907 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1908 enc_type = ROC_SE_KASUMI_F8_ECB;
1909 cipher_key_len = 16;
1910 zsk_flag = ROC_SE_K_F8;
1912 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1913 enc_type = ROC_SE_SNOW3G_UEA2;
1914 cipher_key_len = 16;
1915 zsk_flag = ROC_SE_ZS_EA;
1917 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1918 enc_type = ROC_SE_ZUC_EEA3;
1919 cipher_key_len = 16;
1920 zsk_flag = ROC_SE_ZS_EA;
1922 case RTE_CRYPTO_CIPHER_AES_XTS:
1923 enc_type = ROC_SE_AES_XTS;
1924 cipher_key_len = 16;
1926 case RTE_CRYPTO_CIPHER_3DES_ECB:
1927 enc_type = ROC_SE_DES3_ECB;
1928 cipher_key_len = 24;
1930 case RTE_CRYPTO_CIPHER_AES_ECB:
1931 enc_type = ROC_SE_AES_ECB;
1932 cipher_key_len = 16;
1934 case RTE_CRYPTO_CIPHER_3DES_CTR:
1935 case RTE_CRYPTO_CIPHER_AES_F8:
1936 case RTE_CRYPTO_CIPHER_ARC4:
1937 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
1940 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1945 if (c_form->key.length < cipher_key_len) {
1946 plt_dp_err("Invalid cipher params keylen %u",
1947 c_form->key.length);
1951 sess->zsk_flag = zsk_flag;
1953 sess->aes_ctr = aes_ctr;
1954 sess->iv_offset = c_form->iv.offset;
1955 sess->iv_length = c_form->iv.length;
1956 sess->is_null = is_null;
1958 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1959 c_form->key.data, c_form->key.length,
1966 static __rte_always_inline int
1967 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1969 struct rte_crypto_auth_xform *a_form;
1970 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1971 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1973 if (xform->next != NULL &&
1974 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1975 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1976 /* Perform auth followed by encryption */
1977 sess->roc_se_ctx.template_w4.s.opcode_minor =
1978 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1981 a_form = &xform->auth;
1983 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1984 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1985 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1986 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1988 plt_dp_err("Unknown auth operation");
1992 switch (a_form->algo) {
1993 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1995 case RTE_CRYPTO_AUTH_SHA1:
1996 auth_type = ROC_SE_SHA1_TYPE;
1998 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1999 case RTE_CRYPTO_AUTH_SHA256:
2000 auth_type = ROC_SE_SHA2_SHA256;
2002 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2003 case RTE_CRYPTO_AUTH_SHA512:
2004 auth_type = ROC_SE_SHA2_SHA512;
2006 case RTE_CRYPTO_AUTH_AES_GMAC:
2007 auth_type = ROC_SE_GMAC_TYPE;
2010 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2011 case RTE_CRYPTO_AUTH_SHA224:
2012 auth_type = ROC_SE_SHA2_SHA224;
2014 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2015 case RTE_CRYPTO_AUTH_SHA384:
2016 auth_type = ROC_SE_SHA2_SHA384;
2018 case RTE_CRYPTO_AUTH_MD5_HMAC:
2019 case RTE_CRYPTO_AUTH_MD5:
2020 auth_type = ROC_SE_MD5_TYPE;
2022 case RTE_CRYPTO_AUTH_KASUMI_F9:
2023 auth_type = ROC_SE_KASUMI_F9_ECB;
2025 * Indicate that direction needs to be taken out
2028 zsk_flag = ROC_SE_K_F9;
2030 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2031 auth_type = ROC_SE_SNOW3G_UIA2;
2032 zsk_flag = ROC_SE_ZS_IA;
2034 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2035 auth_type = ROC_SE_ZUC_EIA3;
2036 zsk_flag = ROC_SE_ZS_IA;
2038 case RTE_CRYPTO_AUTH_NULL:
2042 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2043 case RTE_CRYPTO_AUTH_AES_CMAC:
2044 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2045 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
2048 plt_dp_err("Crypto: Undefined Hash algo %u specified",
2053 sess->zsk_flag = zsk_flag;
2054 sess->aes_gcm = aes_gcm;
2055 sess->mac_len = a_form->digest_length;
2056 sess->is_null = is_null;
2058 sess->auth_iv_offset = a_form->iv.offset;
2059 sess->auth_iv_length = a_form->iv.length;
2061 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
2062 a_form->key.data, a_form->key.length,
2063 a_form->digest_length)))
2069 static __rte_always_inline int
2070 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
2072 struct rte_crypto_auth_xform *a_form;
2073 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
2074 roc_se_auth_type auth_type = 0; /* NULL Auth type */
2076 a_form = &xform->auth;
2078 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2079 sess->cpt_op |= ROC_SE_OP_ENCODE;
2080 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2081 sess->cpt_op |= ROC_SE_OP_DECODE;
2083 plt_dp_err("Unknown auth operation");
2087 switch (a_form->algo) {
2088 case RTE_CRYPTO_AUTH_AES_GMAC:
2089 enc_type = ROC_SE_AES_GCM;
2090 auth_type = ROC_SE_GMAC_TYPE;
2093 plt_dp_err("Crypto: Undefined cipher algo %u specified",
2101 sess->iv_offset = a_form->iv.offset;
2102 sess->iv_length = a_form->iv.length;
2103 sess->mac_len = a_form->digest_length;
2105 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
2106 a_form->key.data, a_form->key.length,
2110 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
2111 a_form->digest_length)))
2117 static __rte_always_inline void *
2118 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
2119 struct rte_mempool *cpt_meta_pool,
2120 struct cpt_inflight_req *infl_req)
2124 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2130 infl_req->mdata = mdata;
2131 infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
2136 static __rte_always_inline uint32_t
2137 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
2138 uint32_t start_offset)
2141 void *seg_data = NULL;
2142 int32_t seg_size = 0;
2149 if (!start_offset) {
2150 seg_data = rte_pktmbuf_mtod(pkt, void *);
2151 seg_size = pkt->data_len;
2153 while (start_offset >= pkt->data_len) {
2154 start_offset -= pkt->data_len;
2158 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2159 seg_size = pkt->data_len - start_offset;
2165 iovec->bufs[index].vaddr = seg_data;
2166 iovec->bufs[index].size = seg_size;
2170 while (unlikely(pkt != NULL)) {
2171 seg_data = rte_pktmbuf_mtod(pkt, void *);
2172 seg_size = pkt->data_len;
2176 iovec->bufs[index].vaddr = seg_data;
2177 iovec->bufs[index].size = seg_size;
2184 iovec->buf_cnt = index;
2188 static __rte_always_inline uint32_t
2189 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2190 struct roc_se_fc_params *param, uint32_t *flags)
2193 void *seg_data = NULL;
2194 uint32_t seg_size = 0;
2195 struct roc_se_iov_ptr *iovec;
2197 seg_data = rte_pktmbuf_mtod(pkt, void *);
2198 seg_size = pkt->data_len;
2201 if (likely(!pkt->next)) {
2204 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
2205 headroom = rte_pktmbuf_headroom(pkt);
2206 if (likely(headroom >= 24))
2207 *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
2209 param->bufs[0].vaddr = seg_data;
2210 param->bufs[0].size = seg_size;
2213 iovec = param->src_iov;
2214 iovec->bufs[index].vaddr = seg_data;
2215 iovec->bufs[index].size = seg_size;
2219 while (unlikely(pkt != NULL)) {
2220 seg_data = rte_pktmbuf_mtod(pkt, void *);
2221 seg_size = pkt->data_len;
2226 iovec->bufs[index].vaddr = seg_data;
2227 iovec->bufs[index].size = seg_size;
2234 iovec->buf_cnt = index;
2238 static __rte_always_inline int
2239 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2240 struct cpt_qp_meta_info *m_info,
2241 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2243 struct roc_se_ctx *ctx = &sess->roc_se_ctx;
2244 uint8_t op_minor = ctx->template_w4.s.opcode_minor;
2245 struct rte_crypto_sym_op *sym_op = cop->sym;
2247 uint32_t mc_hash_off;
2249 uint64_t d_offs, d_lens;
2250 struct rte_mbuf *m_src, *m_dst;
2251 uint8_t cpt_op = sess->cpt_op;
2252 #ifdef CPT_ALWAYS_USE_SG_MODE
2253 uint8_t inplace = 0;
2255 uint8_t inplace = 1;
2257 struct roc_se_fc_params fc_params;
2258 char src[SRC_IOV_SIZE];
2259 char dst[SRC_IOV_SIZE];
2263 if (likely(sess->iv_length)) {
2264 flags |= ROC_SE_VALID_IV_BUF;
2265 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
2267 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
2268 memcpy((uint8_t *)iv_buf,
2269 rte_crypto_op_ctod_offset(cop, uint8_t *,
2272 iv_buf[3] = rte_cpu_to_be_32(0x1);
2273 fc_params.iv_buf = iv_buf;
2277 if (sess->zsk_flag) {
2278 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
2279 cop, uint8_t *, sess->auth_iv_offset);
2280 if (sess->zsk_flag != ROC_SE_ZS_EA)
2283 m_src = sym_op->m_src;
2284 m_dst = sym_op->m_dst;
2286 if (sess->aes_gcm || sess->chacha_poly) {
2291 d_offs = sym_op->aead.data.offset;
2292 d_lens = sym_op->aead.data.length;
2294 sym_op->aead.data.offset + sym_op->aead.data.length;
2296 aad_data = sym_op->aead.aad.data;
2297 aad_len = sess->aad_length;
2298 if (likely((aad_data + aad_len) ==
2299 rte_pktmbuf_mtod_offset(m_src, uint8_t *,
2300 sym_op->aead.data.offset))) {
2301 d_offs = (d_offs - aad_len) | (d_offs << 16);
2302 d_lens = (d_lens + aad_len) | (d_lens << 32);
2304 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2305 fc_params.aad_buf.size = aad_len;
2306 flags |= ROC_SE_VALID_AAD_BUF;
2308 d_offs = d_offs << 16;
2309 d_lens = d_lens << 32;
2312 salt = fc_params.iv_buf;
2313 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2314 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2315 sess->salt = *(uint32_t *)salt;
2317 fc_params.iv_buf = salt + 4;
2318 if (likely(sess->mac_len)) {
2319 struct rte_mbuf *m =
2320 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2325 /* hmac immediately following data is best case */
2326 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2328 (uint8_t *)sym_op->aead.digest.data)) {
2329 flags |= ROC_SE_VALID_MAC_BUF;
2330 fc_params.mac_buf.size = sess->mac_len;
2331 fc_params.mac_buf.vaddr =
2332 sym_op->aead.digest.data;
2337 d_offs = sym_op->cipher.data.offset;
2338 d_lens = sym_op->cipher.data.length;
2340 sym_op->cipher.data.offset + sym_op->cipher.data.length;
2341 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
2342 d_lens = (d_lens << 32) | sym_op->auth.data.length;
2345 (sym_op->auth.data.offset + sym_op->auth.data.length)) {
2346 mc_hash_off = (sym_op->auth.data.offset +
2347 sym_op->auth.data.length);
2349 /* for gmac, salt should be updated like in gcm */
2350 if (unlikely(sess->is_gmac)) {
2352 salt = fc_params.iv_buf;
2353 if (unlikely(*(uint32_t *)salt != sess->salt)) {
2354 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
2355 sess->salt = *(uint32_t *)salt;
2357 fc_params.iv_buf = salt + 4;
2359 if (likely(sess->mac_len)) {
2362 m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
2366 /* hmac immediately following data is best case */
2367 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2368 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
2370 (uint8_t *)sym_op->auth.digest.data))) {
2371 flags |= ROC_SE_VALID_MAC_BUF;
2372 fc_params.mac_buf.size = sess->mac_len;
2373 fc_params.mac_buf.vaddr =
2374 sym_op->auth.digest.data;
2379 fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
2381 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
2382 unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
2385 if (likely(!m_dst && inplace)) {
2386 /* Case of single buffer without AAD buf or
2387 * separate mac buf in place and
2390 fc_params.dst_iov = fc_params.src_iov = (void *)src;
2392 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
2394 plt_dp_err("Prepare inplace src iov failed");
2400 /* Out of place processing */
2401 fc_params.src_iov = (void *)src;
2402 fc_params.dst_iov = (void *)dst;
2404 /* Store SG I/O in the api for reuse */
2405 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
2406 plt_dp_err("Prepare src iov failed");
2411 if (unlikely(m_dst != NULL)) {
2414 /* Try to make room as much as src has */
2415 pkt_len = rte_pktmbuf_pkt_len(m_dst);
2417 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
2418 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
2419 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
2420 plt_dp_err("Not enough space in "
2429 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
2430 plt_dp_err("Prepare dst iov failed for "
2437 fc_params.dst_iov = (void *)src;
2441 if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
2442 (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
2443 ((ctx->fc_type == ROC_SE_FC_GEN) ||
2444 (ctx->fc_type == ROC_SE_PDCP))))) {
2445 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
2446 m_info->pool, infl_req);
2447 if (mdata == NULL) {
2448 plt_dp_err("Error allocating meta buffer for request");
2453 /* Finally prepare the instruction */
2454 if (cpt_op & ROC_SE_OP_ENCODE)
2455 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
2458 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
2461 if (unlikely(ret)) {
2462 plt_dp_err("Preparing request failed due to bad input arg");
2463 goto free_mdata_and_exit;
2468 free_mdata_and_exit:
2469 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2470 rte_mempool_put(m_info->pool, infl_req->mdata);
2475 static __rte_always_inline void
2476 compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
2479 struct rte_crypto_sym_op *sym_op = op->sym;
2481 if (sym_op->auth.digest.data)
2482 mac = sym_op->auth.digest.data;
2484 mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
2485 sym_op->auth.data.length +
2486 sym_op->auth.data.offset);
2488 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2492 if (memcmp(mac, gen_mac, mac_len))
2493 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
2495 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2498 static __rte_always_inline void
2499 find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
2500 uint32_t *addr_length_in_bits,
2501 uint8_t *addr_direction)
2506 while (!found && counter_num_bytes > 0) {
2507 counter_num_bytes--;
2508 if (src[counter_num_bytes] == 0x00)
2510 pos = rte_bsf32(src[counter_num_bytes]);
2512 if (likely(counter_num_bytes > 0)) {
2513 last_byte = src[counter_num_bytes - 1];
2514 *addr_direction = last_byte & 0x1;
2515 *addr_length_in_bits =
2516 counter_num_bytes * 8 - 1;
2519 last_byte = src[counter_num_bytes];
2520 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
2521 *addr_length_in_bits =
2522 counter_num_bytes * 8 + (8 - (pos + 2));
2529 * This handles all auth only except AES_GMAC
2531 static __rte_always_inline int
2532 fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
2533 struct cpt_qp_meta_info *m_info,
2534 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
2537 struct rte_crypto_sym_op *sym_op = cop->sym;
2539 uint32_t auth_range_off;
2541 uint64_t d_offs = 0, d_lens;
2542 struct rte_mbuf *m_src, *m_dst;
2543 uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
2544 uint16_t mac_len = sess->mac_len;
2545 struct roc_se_fc_params params;
2546 char src[SRC_IOV_SIZE];
2550 memset(¶ms, 0, sizeof(struct roc_se_fc_params));
2552 m_src = sym_op->m_src;
2554 mdata = alloc_op_meta(¶ms.meta_buf, m_info->mlen, m_info->pool,
2556 if (mdata == NULL) {
2561 auth_range_off = sym_op->auth.data.offset;
2563 flags = ROC_SE_VALID_MAC_BUF;
2564 params.src_iov = (void *)src;
2565 if (unlikely(sess->zsk_flag)) {
2567 * Since for Zuc, Kasumi, Snow3g offsets are in bits
2568 * we will send pass through even for auth only case,
2571 d_offs = auth_range_off;
2573 params.auth_iv_buf = rte_crypto_op_ctod_offset(
2574 cop, uint8_t *, sess->auth_iv_offset);
2575 if (sess->zsk_flag == ROC_SE_K_F9) {
2576 uint32_t length_in_bits, num_bytes;
2577 uint8_t *src, direction = 0;
2580 rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
2582 * This is kasumi f9, take direction from
2585 length_in_bits = cop->sym->auth.data.length;
2586 num_bytes = (length_in_bits >> 3);
2587 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
2588 find_kasumif9_direction_and_length(
2589 src, num_bytes, &length_in_bits, &direction);
2590 length_in_bits -= 64;
2591 cop->sym->auth.data.offset += 64;
2592 d_offs = cop->sym->auth.data.offset;
2593 auth_range_off = d_offs / 8;
2594 cop->sym->auth.data.length = length_in_bits;
2596 /* Store it at end of auth iv */
2597 iv_buf[8] = direction;
2598 params.auth_iv_buf = iv_buf;
2602 d_lens = sym_op->auth.data.length;
2604 params.ctx_buf.vaddr = &sess->roc_se_ctx;
2606 if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
2607 if (sym_op->auth.digest.data) {
2609 * Digest to be generated
2610 * in separate buffer
2612 params.mac_buf.size = sess->mac_len;
2613 params.mac_buf.vaddr = sym_op->auth.digest.data;
2615 uint32_t off = sym_op->auth.data.offset +
2616 sym_op->auth.data.length;
2617 int32_t dlen, space;
2619 m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
2620 dlen = rte_pktmbuf_pkt_len(m_dst);
2622 space = off + mac_len - dlen;
2624 if (!rte_pktmbuf_append(m_dst, space)) {
2625 plt_dp_err("Failed to extend "
2629 goto free_mdata_and_exit;
2632 params.mac_buf.vaddr =
2633 rte_pktmbuf_mtod_offset(m_dst, void *, off);
2634 params.mac_buf.size = mac_len;
2637 uint64_t *op = mdata;
2639 /* Need space for storing generated mac */
2640 space += 2 * sizeof(uint64_t);
2642 params.mac_buf.vaddr = (uint8_t *)mdata + space;
2643 params.mac_buf.size = mac_len;
2644 space += RTE_ALIGN_CEIL(mac_len, 8);
2645 op[0] = (uintptr_t)params.mac_buf.vaddr;
2647 infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
2650 params.meta_buf.vaddr = (uint8_t *)mdata + space;
2651 params.meta_buf.size -= space;
2653 /* Out of place processing */
2654 params.src_iov = (void *)src;
2656 /*Store SG I/O in the api for reuse */
2657 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
2658 plt_dp_err("Prepare src iov failed");
2660 goto free_mdata_and_exit;
2663 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, inst);
2665 goto free_mdata_and_exit;
2669 free_mdata_and_exit:
2670 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
2671 rte_mempool_put(m_info->pool, infl_req->mdata);
2675 #endif /*_CNXK_SE_H_ */