1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #include "cnxk_cryptodev.h"
10 #include "cnxk_cryptodev_ops.h"
12 #define SRC_IOV_SIZE \
13 (sizeof(struct roc_se_iov_ptr) + \
14 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
15 #define DST_IOV_SIZE \
16 (sizeof(struct roc_se_iov_ptr) + \
17 (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
21 uint16_t zsk_flag : 4;
24 uint16_t chacha_poly : 1;
31 uint8_t auth_iv_length;
33 uint16_t auth_iv_offset;
36 struct roc_se_ctx roc_se_ctx;
37 } __rte_cache_aligned;
39 static __rte_always_inline int
40 cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
42 uint16_t mac_len = auth->digest_length;
46 case RTE_CRYPTO_AUTH_MD5:
47 case RTE_CRYPTO_AUTH_MD5_HMAC:
48 ret = (mac_len == 16) ? 0 : -1;
50 case RTE_CRYPTO_AUTH_SHA1:
51 case RTE_CRYPTO_AUTH_SHA1_HMAC:
52 ret = (mac_len == 20) ? 0 : -1;
54 case RTE_CRYPTO_AUTH_SHA224:
55 case RTE_CRYPTO_AUTH_SHA224_HMAC:
56 ret = (mac_len == 28) ? 0 : -1;
58 case RTE_CRYPTO_AUTH_SHA256:
59 case RTE_CRYPTO_AUTH_SHA256_HMAC:
60 ret = (mac_len == 32) ? 0 : -1;
62 case RTE_CRYPTO_AUTH_SHA384:
63 case RTE_CRYPTO_AUTH_SHA384_HMAC:
64 ret = (mac_len == 48) ? 0 : -1;
66 case RTE_CRYPTO_AUTH_SHA512:
67 case RTE_CRYPTO_AUTH_SHA512_HMAC:
68 ret = (mac_len == 64) ? 0 : -1;
70 case RTE_CRYPTO_AUTH_NULL:
80 static __rte_always_inline void
81 cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
83 struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
84 memcpy(fctx->enc.encr_iv, salt, 4);
87 static __rte_always_inline uint32_t
88 fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
91 struct roc_se_sglist_comp *to = &list[i >> 2];
93 to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
94 to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
99 static __rte_always_inline uint32_t
100 fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
101 struct roc_se_buf_ptr *from)
103 struct roc_se_sglist_comp *to = &list[i >> 2];
105 to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
106 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
111 static __rte_always_inline uint32_t
112 fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
113 struct roc_se_buf_ptr *from, uint32_t *psize)
115 struct roc_se_sglist_comp *to = &list[i >> 2];
116 uint32_t size = *psize;
119 e_len = (size > from->size) ? from->size : size;
120 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
121 to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
128 * This fills the MC expected SGIO list
129 * from IOV given by user.
131 static __rte_always_inline uint32_t
132 fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
133 struct roc_se_iov_ptr *from, uint32_t from_offset,
134 uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
135 uint32_t extra_offset)
138 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
139 uint32_t size = *psize;
140 struct roc_se_buf_ptr *bufs;
143 for (j = 0; (j < from->buf_cnt) && size; j++) {
146 struct roc_se_sglist_comp *to = &list[i >> 2];
148 if (unlikely(from_offset)) {
149 if (from_offset >= bufs[j].size) {
150 from_offset -= bufs[j].size;
153 e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
154 e_len = (size > (bufs[j].size - from_offset)) ?
155 (bufs[j].size - from_offset) :
159 e_vaddr = (uint64_t)bufs[j].vaddr;
160 e_len = (size > bufs[j].size) ? bufs[j].size : size;
163 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
164 to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
166 if (extra_len && (e_len >= extra_offset)) {
167 /* Break the data at given offset */
168 uint32_t next_len = e_len - extra_offset;
169 uint64_t next_vaddr = e_vaddr + extra_offset;
174 e_len = extra_offset;
176 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
179 extra_len = RTE_MIN(extra_len, size);
180 /* Insert extra data ptr */
185 rte_cpu_to_be_16(extra_len);
186 to->ptr[i % 4] = rte_cpu_to_be_64(
187 (uint64_t)extra_buf->vaddr);
191 next_len = RTE_MIN(next_len, size);
192 /* insert the rest of the data */
196 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
197 to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
206 extra_offset -= size;
214 static __rte_always_inline int
215 cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
216 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
218 uint32_t iv_offset = 0;
219 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
220 struct roc_se_ctx *se_ctx;
221 uint32_t cipher_type, hash_type;
222 uint32_t mac_len, size;
224 struct roc_se_buf_ptr *aad_buf = NULL;
225 uint32_t encr_offset, auth_offset;
226 uint32_t encr_data_len, auth_data_len, aad_len = 0;
227 uint32_t passthrough_len = 0;
228 union cpt_inst_w4 cpt_inst_w4;
232 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
233 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
234 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
235 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
236 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
237 /* We don't support both AAD and auth data separately */
240 aad_len = fc_params->aad_buf.size;
241 aad_buf = &fc_params->aad_buf;
243 se_ctx = fc_params->ctx_buf.vaddr;
244 cipher_type = se_ctx->enc_cipher;
245 hash_type = se_ctx->hash_type;
246 mac_len = se_ctx->mac_len;
247 op_minor = se_ctx->template_w4.s.opcode_minor;
249 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
251 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
254 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
256 * When AAD is given, data above encr_offset is pass through
257 * Since AAD is given as separate pointer and not as offset,
258 * this is a special case as we need to fragment input data
259 * into passthrough + encr_data and then insert AAD in between.
261 if (hash_type != ROC_SE_GMAC_TYPE) {
262 passthrough_len = encr_offset;
263 auth_offset = passthrough_len + iv_len;
264 encr_offset = passthrough_len + aad_len + iv_len;
265 auth_data_len = aad_len + encr_data_len;
267 passthrough_len = 16 + aad_len;
268 auth_offset = passthrough_len + iv_len;
269 auth_data_len = aad_len;
272 encr_offset += iv_len;
273 auth_offset += iv_len;
277 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
278 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
279 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
281 if (hash_type == ROC_SE_GMAC_TYPE) {
286 auth_dlen = auth_offset + auth_data_len;
287 enc_dlen = encr_data_len + encr_offset;
288 if (unlikely(encr_data_len & 0xf)) {
289 if ((cipher_type == ROC_SE_DES3_CBC) ||
290 (cipher_type == ROC_SE_DES3_ECB))
292 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
293 else if (likely((cipher_type == ROC_SE_AES_CBC) ||
294 (cipher_type == ROC_SE_AES_ECB)))
296 RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
299 if (unlikely(auth_dlen > enc_dlen)) {
300 inputlen = auth_dlen;
301 outputlen = auth_dlen + mac_len;
304 outputlen = enc_dlen + mac_len;
307 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
308 outputlen = enc_dlen;
311 cpt_inst_w4.s.param1 = encr_data_len;
312 cpt_inst_w4.s.param2 = auth_data_len;
315 * In cn9k, cn10k since we have a limitation of
316 * IV & Offset control word not part of instruction
317 * and need to be part of Data Buffer, we check if
318 * head room is there and then only do the Direct mode processing
320 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
321 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
322 void *dm_vaddr = fc_params->bufs[0].vaddr;
324 /* Use Direct mode */
327 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
330 inst->dptr = (uint64_t)offset_vaddr;
332 /* RPTR should just exclude offset control word */
333 inst->rptr = (uint64_t)dm_vaddr - iv_len;
335 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
337 if (likely(iv_len)) {
338 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
339 ROC_SE_OFF_CTRL_LEN);
340 uint64_t *src = fc_params->iv_buf;
346 void *m_vaddr = fc_params->meta_buf.vaddr;
347 uint32_t i, g_size_bytes, s_size_bytes;
348 struct roc_se_sglist_comp *gather_comp;
349 struct roc_se_sglist_comp *scatter_comp;
352 /* This falls under strict SG mode */
353 offset_vaddr = m_vaddr;
354 size = ROC_SE_OFF_CTRL_LEN + iv_len;
356 m_vaddr = (uint8_t *)m_vaddr + size;
358 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
360 if (likely(iv_len)) {
361 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
362 ROC_SE_OFF_CTRL_LEN);
363 uint64_t *src = fc_params->iv_buf;
368 /* DPTR has SG list */
371 ((uint16_t *)in_buffer)[0] = 0;
372 ((uint16_t *)in_buffer)[1] = 0;
374 /* TODO Add error check if space will be sufficient */
376 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
384 /* Offset control word that includes iv */
385 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
386 ROC_SE_OFF_CTRL_LEN + iv_len);
389 size = inputlen - iv_len;
391 uint32_t aad_offset = aad_len ? passthrough_len : 0;
393 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
394 i = fill_sg_comp_from_buf_min(
395 gather_comp, i, fc_params->bufs, &size);
397 i = fill_sg_comp_from_iov(
398 gather_comp, i, fc_params->src_iov, 0,
399 &size, aad_buf, aad_offset);
402 if (unlikely(size)) {
403 plt_dp_err("Insufficient buffer space,"
409 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
411 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
414 * Output Scatter list
418 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
422 if (likely(iv_len)) {
423 i = fill_sg_comp(scatter_comp, i,
424 (uint64_t)offset_vaddr +
429 /* output data or output data + digest*/
430 if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
431 size = outputlen - iv_len - mac_len;
433 uint32_t aad_offset =
434 aad_len ? passthrough_len : 0;
437 ROC_SE_SINGLE_BUF_INPLACE)) {
438 i = fill_sg_comp_from_buf_min(
440 fc_params->bufs, &size);
442 i = fill_sg_comp_from_iov(
444 fc_params->dst_iov, 0, &size,
445 aad_buf, aad_offset);
447 if (unlikely(size)) {
448 plt_dp_err("Insufficient buffer"
449 " space, size %d needed",
456 i = fill_sg_comp_from_buf(scatter_comp, i,
457 &fc_params->mac_buf);
460 /* Output including mac */
461 size = outputlen - iv_len;
463 uint32_t aad_offset =
464 aad_len ? passthrough_len : 0;
467 ROC_SE_SINGLE_BUF_INPLACE)) {
468 i = fill_sg_comp_from_buf_min(
470 fc_params->bufs, &size);
472 i = fill_sg_comp_from_iov(
474 fc_params->dst_iov, 0, &size,
475 aad_buf, aad_offset);
477 if (unlikely(size)) {
478 plt_dp_err("Insufficient buffer"
479 " space, size %d needed",
485 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
487 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
489 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
491 /* This is DPTR len in case of SG mode */
492 cpt_inst_w4.s.dlen = size;
494 inst->dptr = (uint64_t)in_buffer;
497 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
498 (auth_offset >> 8))) {
499 plt_dp_err("Offset not supported");
500 plt_dp_err("enc_offset: %d", encr_offset);
501 plt_dp_err("iv_offset : %d", iv_offset);
502 plt_dp_err("auth_offset: %d", auth_offset);
506 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
507 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
508 ((uint64_t)auth_offset));
510 inst->w4.u64 = cpt_inst_w4.u64;
514 static __rte_always_inline int
515 cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
516 struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
518 uint32_t iv_offset = 0, size;
519 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
520 struct roc_se_ctx *se_ctx;
521 int32_t hash_type, mac_len;
523 struct roc_se_buf_ptr *aad_buf = NULL;
524 uint32_t encr_offset, auth_offset;
525 uint32_t encr_data_len, auth_data_len, aad_len = 0;
526 uint32_t passthrough_len = 0;
527 union cpt_inst_w4 cpt_inst_w4;
531 encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
532 auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
533 encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
534 auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
536 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
537 /* We don't support both AAD and auth data separately */
540 aad_len = fc_params->aad_buf.size;
541 aad_buf = &fc_params->aad_buf;
544 se_ctx = fc_params->ctx_buf.vaddr;
545 hash_type = se_ctx->hash_type;
546 mac_len = se_ctx->mac_len;
547 op_minor = se_ctx->template_w4.s.opcode_minor;
549 if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
551 iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
554 if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
556 * When AAD is given, data above encr_offset is pass through
557 * Since AAD is given as separate pointer and not as offset,
558 * this is a special case as we need to fragment input data
559 * into passthrough + encr_data and then insert AAD in between.
561 if (hash_type != ROC_SE_GMAC_TYPE) {
562 passthrough_len = encr_offset;
563 auth_offset = passthrough_len + iv_len;
564 encr_offset = passthrough_len + aad_len + iv_len;
565 auth_data_len = aad_len + encr_data_len;
567 passthrough_len = 16 + aad_len;
568 auth_offset = passthrough_len + iv_len;
569 auth_data_len = aad_len;
572 encr_offset += iv_len;
573 auth_offset += iv_len;
577 cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
578 cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
579 cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
581 if (hash_type == ROC_SE_GMAC_TYPE) {
586 enc_dlen = encr_offset + encr_data_len;
587 auth_dlen = auth_offset + auth_data_len;
589 if (auth_dlen > enc_dlen) {
590 inputlen = auth_dlen + mac_len;
591 outputlen = auth_dlen;
593 inputlen = enc_dlen + mac_len;
594 outputlen = enc_dlen;
597 if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
598 outputlen = inputlen = enc_dlen;
600 cpt_inst_w4.s.param1 = encr_data_len;
601 cpt_inst_w4.s.param2 = auth_data_len;
604 * In cn9k, cn10k since we have a limitation of
605 * IV & Offset control word not part of instruction
606 * and need to be part of Data Buffer, we check if
607 * head room is there and then only do the Direct mode processing
609 if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
610 (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
611 void *dm_vaddr = fc_params->bufs[0].vaddr;
613 /* Use Direct mode */
616 (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
617 inst->dptr = (uint64_t)offset_vaddr;
619 /* RPTR should just exclude offset control word */
620 inst->rptr = (uint64_t)dm_vaddr - iv_len;
622 cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
624 if (likely(iv_len)) {
625 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
626 ROC_SE_OFF_CTRL_LEN);
627 uint64_t *src = fc_params->iv_buf;
633 void *m_vaddr = fc_params->meta_buf.vaddr;
634 uint32_t g_size_bytes, s_size_bytes;
635 struct roc_se_sglist_comp *gather_comp;
636 struct roc_se_sglist_comp *scatter_comp;
640 /* This falls under strict SG mode */
641 offset_vaddr = m_vaddr;
642 size = ROC_SE_OFF_CTRL_LEN + iv_len;
644 m_vaddr = (uint8_t *)m_vaddr + size;
646 cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
648 if (likely(iv_len)) {
649 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
650 ROC_SE_OFF_CTRL_LEN);
651 uint64_t *src = fc_params->iv_buf;
656 /* DPTR has SG list */
659 ((uint16_t *)in_buffer)[0] = 0;
660 ((uint16_t *)in_buffer)[1] = 0;
662 /* TODO Add error check if space will be sufficient */
664 (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
671 /* Offset control word that includes iv */
672 i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
673 ROC_SE_OFF_CTRL_LEN + iv_len);
676 if (flags & ROC_SE_VALID_MAC_BUF) {
677 size = inputlen - iv_len - mac_len;
679 /* input data only */
681 ROC_SE_SINGLE_BUF_INPLACE)) {
682 i = fill_sg_comp_from_buf_min(
683 gather_comp, i, fc_params->bufs,
686 uint32_t aad_offset =
687 aad_len ? passthrough_len : 0;
689 i = fill_sg_comp_from_iov(
691 fc_params->src_iov, 0, &size,
692 aad_buf, aad_offset);
694 if (unlikely(size)) {
695 plt_dp_err("Insufficient buffer"
696 " space, size %d needed",
704 i = fill_sg_comp_from_buf(gather_comp, i,
705 &fc_params->mac_buf);
708 /* input data + mac */
709 size = inputlen - iv_len;
712 ROC_SE_SINGLE_BUF_INPLACE)) {
713 i = fill_sg_comp_from_buf_min(
714 gather_comp, i, fc_params->bufs,
717 uint32_t aad_offset =
718 aad_len ? passthrough_len : 0;
720 if (unlikely(!fc_params->src_iov)) {
721 plt_dp_err("Bad input args");
725 i = fill_sg_comp_from_iov(
727 fc_params->src_iov, 0, &size,
728 aad_buf, aad_offset);
731 if (unlikely(size)) {
732 plt_dp_err("Insufficient buffer"
733 " space, size %d needed",
739 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
741 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
744 * Output Scatter List
749 (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
754 i = fill_sg_comp(scatter_comp, i,
755 (uint64_t)offset_vaddr +
760 /* Add output data */
761 size = outputlen - iv_len;
763 if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
764 /* handle single buffer here */
765 i = fill_sg_comp_from_buf_min(scatter_comp, i,
769 uint32_t aad_offset =
770 aad_len ? passthrough_len : 0;
772 if (unlikely(!fc_params->dst_iov)) {
773 plt_dp_err("Bad input args");
777 i = fill_sg_comp_from_iov(
778 scatter_comp, i, fc_params->dst_iov, 0,
779 &size, aad_buf, aad_offset);
782 if (unlikely(size)) {
783 plt_dp_err("Insufficient buffer space,"
790 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
792 ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
794 size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
796 /* This is DPTR len in case of SG mode */
797 cpt_inst_w4.s.dlen = size;
799 inst->dptr = (uint64_t)in_buffer;
802 if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
803 (auth_offset >> 8))) {
804 plt_dp_err("Offset not supported");
805 plt_dp_err("enc_offset: %d", encr_offset);
806 plt_dp_err("iv_offset : %d", iv_offset);
807 plt_dp_err("auth_offset: %d", auth_offset);
811 *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
812 ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
813 ((uint64_t)auth_offset));
815 inst->w4.u64 = cpt_inst_w4.u64;
819 static __rte_always_inline int
820 cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
821 struct roc_se_fc_params *fc_params,
822 struct cpt_inst_s *inst)
824 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
828 fc_type = ctx->fc_type;
830 if (likely(fc_type == ROC_SE_FC_GEN))
831 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
835 static __rte_always_inline int
836 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
837 struct roc_se_fc_params *fc_params,
838 struct cpt_inst_s *inst)
840 struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
844 fc_type = ctx->fc_type;
846 if (likely(fc_type == ROC_SE_FC_GEN))
847 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
852 static __rte_always_inline int
853 fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
855 struct rte_crypto_aead_xform *aead_form;
856 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
857 roc_se_auth_type auth_type = 0; /* NULL Auth type */
858 uint32_t cipher_key_len = 0;
860 aead_form = &xform->aead;
862 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
863 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
864 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
865 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
866 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
867 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
869 plt_dp_err("Unknown aead operation\n");
872 switch (aead_form->algo) {
873 case RTE_CRYPTO_AEAD_AES_GCM:
874 enc_type = ROC_SE_AES_GCM;
878 case RTE_CRYPTO_AEAD_AES_CCM:
879 plt_dp_err("Crypto: Unsupported cipher algo %u",
882 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
883 enc_type = ROC_SE_CHACHA20;
884 auth_type = ROC_SE_POLY1305;
886 sess->chacha_poly = 1;
889 plt_dp_err("Crypto: Undefined cipher algo %u specified",
893 if (aead_form->key.length < cipher_key_len) {
894 plt_dp_err("Invalid cipher params keylen %u",
895 aead_form->key.length);
899 sess->aes_gcm = aes_gcm;
900 sess->mac_len = aead_form->digest_length;
901 sess->iv_offset = aead_form->iv.offset;
902 sess->iv_length = aead_form->iv.length;
903 sess->aad_length = aead_form->aad_length;
905 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
907 aead_form->key.length, NULL)))
910 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
911 aead_form->digest_length)))
917 static __rte_always_inline int
918 fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
920 struct rte_crypto_cipher_xform *c_form;
921 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
922 uint32_t cipher_key_len = 0;
923 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
925 c_form = &xform->cipher;
927 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
928 sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
929 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
930 sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
931 if (xform->next != NULL &&
932 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
933 /* Perform decryption followed by auth verify */
934 sess->roc_se_ctx.template_w4.s.opcode_minor =
935 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
938 plt_dp_err("Unknown cipher operation\n");
942 switch (c_form->algo) {
943 case RTE_CRYPTO_CIPHER_AES_CBC:
944 enc_type = ROC_SE_AES_CBC;
947 case RTE_CRYPTO_CIPHER_3DES_CBC:
948 enc_type = ROC_SE_DES3_CBC;
951 case RTE_CRYPTO_CIPHER_DES_CBC:
952 /* DES is implemented using 3DES in hardware */
953 enc_type = ROC_SE_DES3_CBC;
956 case RTE_CRYPTO_CIPHER_AES_CTR:
957 enc_type = ROC_SE_AES_CTR;
961 case RTE_CRYPTO_CIPHER_NULL:
965 case RTE_CRYPTO_CIPHER_KASUMI_F8:
966 enc_type = ROC_SE_KASUMI_F8_ECB;
968 zsk_flag = ROC_SE_K_F8;
970 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
971 enc_type = ROC_SE_SNOW3G_UEA2;
973 zsk_flag = ROC_SE_ZS_EA;
975 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
976 enc_type = ROC_SE_ZUC_EEA3;
978 zsk_flag = ROC_SE_ZS_EA;
980 case RTE_CRYPTO_CIPHER_AES_XTS:
981 enc_type = ROC_SE_AES_XTS;
984 case RTE_CRYPTO_CIPHER_3DES_ECB:
985 enc_type = ROC_SE_DES3_ECB;
988 case RTE_CRYPTO_CIPHER_AES_ECB:
989 enc_type = ROC_SE_AES_ECB;
992 case RTE_CRYPTO_CIPHER_3DES_CTR:
993 case RTE_CRYPTO_CIPHER_AES_F8:
994 case RTE_CRYPTO_CIPHER_ARC4:
995 plt_dp_err("Crypto: Unsupported cipher algo %u", c_form->algo);
998 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1003 if (c_form->key.length < cipher_key_len) {
1004 plt_dp_err("Invalid cipher params keylen %u",
1005 c_form->key.length);
1009 sess->zsk_flag = zsk_flag;
1011 sess->aes_ctr = aes_ctr;
1012 sess->iv_offset = c_form->iv.offset;
1013 sess->iv_length = c_form->iv.length;
1014 sess->is_null = is_null;
1016 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1017 c_form->key.data, c_form->key.length,
1024 static __rte_always_inline int
1025 fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1027 struct rte_crypto_auth_xform *a_form;
1028 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1029 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1031 if (xform->next != NULL &&
1032 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1033 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1034 /* Perform auth followed by encryption */
1035 sess->roc_se_ctx.template_w4.s.opcode_minor =
1036 ROC_SE_FC_MINOR_OP_HMAC_FIRST;
1039 a_form = &xform->auth;
1041 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1042 sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
1043 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1044 sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
1046 plt_dp_err("Unknown auth operation");
1050 switch (a_form->algo) {
1051 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1053 case RTE_CRYPTO_AUTH_SHA1:
1054 auth_type = ROC_SE_SHA1_TYPE;
1056 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1057 case RTE_CRYPTO_AUTH_SHA256:
1058 auth_type = ROC_SE_SHA2_SHA256;
1060 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1061 case RTE_CRYPTO_AUTH_SHA512:
1062 auth_type = ROC_SE_SHA2_SHA512;
1064 case RTE_CRYPTO_AUTH_AES_GMAC:
1065 auth_type = ROC_SE_GMAC_TYPE;
1068 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1069 case RTE_CRYPTO_AUTH_SHA224:
1070 auth_type = ROC_SE_SHA2_SHA224;
1072 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1073 case RTE_CRYPTO_AUTH_SHA384:
1074 auth_type = ROC_SE_SHA2_SHA384;
1076 case RTE_CRYPTO_AUTH_MD5_HMAC:
1077 case RTE_CRYPTO_AUTH_MD5:
1078 auth_type = ROC_SE_MD5_TYPE;
1080 case RTE_CRYPTO_AUTH_KASUMI_F9:
1081 auth_type = ROC_SE_KASUMI_F9_ECB;
1083 * Indicate that direction needs to be taken out
1086 zsk_flag = ROC_SE_K_F9;
1088 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1089 auth_type = ROC_SE_SNOW3G_UIA2;
1090 zsk_flag = ROC_SE_ZS_IA;
1092 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1093 auth_type = ROC_SE_ZUC_EIA3;
1094 zsk_flag = ROC_SE_ZS_IA;
1096 case RTE_CRYPTO_AUTH_NULL:
1100 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1101 case RTE_CRYPTO_AUTH_AES_CMAC:
1102 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1103 plt_dp_err("Crypto: Unsupported hash algo %u", a_form->algo);
1106 plt_dp_err("Crypto: Undefined Hash algo %u specified",
1111 sess->zsk_flag = zsk_flag;
1112 sess->aes_gcm = aes_gcm;
1113 sess->mac_len = a_form->digest_length;
1114 sess->is_null = is_null;
1116 sess->auth_iv_offset = a_form->iv.offset;
1117 sess->auth_iv_length = a_form->iv.length;
1119 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type,
1120 a_form->key.data, a_form->key.length,
1121 a_form->digest_length)))
1127 static __rte_always_inline int
1128 fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
1130 struct rte_crypto_auth_xform *a_form;
1131 roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
1132 roc_se_auth_type auth_type = 0; /* NULL Auth type */
1134 a_form = &xform->auth;
1136 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1137 sess->cpt_op |= ROC_SE_OP_ENCODE;
1138 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1139 sess->cpt_op |= ROC_SE_OP_DECODE;
1141 plt_dp_err("Unknown auth operation");
1145 switch (a_form->algo) {
1146 case RTE_CRYPTO_AUTH_AES_GMAC:
1147 enc_type = ROC_SE_AES_GCM;
1148 auth_type = ROC_SE_GMAC_TYPE;
1151 plt_dp_err("Crypto: Undefined cipher algo %u specified",
1159 sess->iv_offset = a_form->iv.offset;
1160 sess->iv_length = a_form->iv.length;
1161 sess->mac_len = a_form->digest_length;
1163 if (unlikely(roc_se_ciph_key_set(&sess->roc_se_ctx, enc_type,
1164 a_form->key.data, a_form->key.length,
1168 if (unlikely(roc_se_auth_key_set(&sess->roc_se_ctx, auth_type, NULL, 0,
1169 a_form->digest_length)))
1175 static __rte_always_inline void *
1176 alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
1177 struct rte_mempool *cpt_meta_pool,
1178 struct cpt_inflight_req *infl_req)
1182 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1188 infl_req->mdata = mdata;
1189 infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
1194 static __rte_always_inline uint32_t
1195 prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
1196 uint32_t start_offset)
1199 void *seg_data = NULL;
1200 int32_t seg_size = 0;
1207 if (!start_offset) {
1208 seg_data = rte_pktmbuf_mtod(pkt, void *);
1209 seg_size = pkt->data_len;
1211 while (start_offset >= pkt->data_len) {
1212 start_offset -= pkt->data_len;
1216 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1217 seg_size = pkt->data_len - start_offset;
1223 iovec->bufs[index].vaddr = seg_data;
1224 iovec->bufs[index].size = seg_size;
1228 while (unlikely(pkt != NULL)) {
1229 seg_data = rte_pktmbuf_mtod(pkt, void *);
1230 seg_size = pkt->data_len;
1234 iovec->bufs[index].vaddr = seg_data;
1235 iovec->bufs[index].size = seg_size;
1242 iovec->buf_cnt = index;
1246 static __rte_always_inline uint32_t
1247 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
1248 struct roc_se_fc_params *param, uint32_t *flags)
1251 void *seg_data = NULL;
1252 uint32_t seg_size = 0;
1253 struct roc_se_iov_ptr *iovec;
1255 seg_data = rte_pktmbuf_mtod(pkt, void *);
1256 seg_size = pkt->data_len;
1259 if (likely(!pkt->next)) {
1262 *flags |= ROC_SE_SINGLE_BUF_INPLACE;
1263 headroom = rte_pktmbuf_headroom(pkt);
1264 if (likely(headroom >= 24))
1265 *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
1267 param->bufs[0].vaddr = seg_data;
1268 param->bufs[0].size = seg_size;
1271 iovec = param->src_iov;
1272 iovec->bufs[index].vaddr = seg_data;
1273 iovec->bufs[index].size = seg_size;
1277 while (unlikely(pkt != NULL)) {
1278 seg_data = rte_pktmbuf_mtod(pkt, void *);
1279 seg_size = pkt->data_len;
1284 iovec->bufs[index].vaddr = seg_data;
1285 iovec->bufs[index].size = seg_size;
1292 iovec->buf_cnt = index;
1296 static __rte_always_inline int
1297 fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
1298 struct cpt_qp_meta_info *m_info,
1299 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
1301 struct roc_se_ctx *ctx = &sess->roc_se_ctx;
1302 uint8_t op_minor = ctx->template_w4.s.opcode_minor;
1303 struct rte_crypto_sym_op *sym_op = cop->sym;
1305 uint32_t mc_hash_off;
1307 uint64_t d_offs, d_lens;
1308 struct rte_mbuf *m_src, *m_dst;
1309 uint8_t cpt_op = sess->cpt_op;
1310 #ifdef CPT_ALWAYS_USE_SG_MODE
1311 uint8_t inplace = 0;
1313 uint8_t inplace = 1;
1315 struct roc_se_fc_params fc_params;
1316 char src[SRC_IOV_SIZE];
1317 char dst[SRC_IOV_SIZE];
1321 if (likely(sess->iv_length)) {
1322 flags |= ROC_SE_VALID_IV_BUF;
1323 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
1325 if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
1326 memcpy((uint8_t *)iv_buf,
1327 rte_crypto_op_ctod_offset(cop, uint8_t *,
1330 iv_buf[3] = rte_cpu_to_be_32(0x1);
1331 fc_params.iv_buf = iv_buf;
1335 if (sess->zsk_flag) {
1336 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
1337 cop, uint8_t *, sess->auth_iv_offset);
1338 if (sess->zsk_flag != ROC_SE_ZS_EA)
1341 m_src = sym_op->m_src;
1342 m_dst = sym_op->m_dst;
1344 if (sess->aes_gcm || sess->chacha_poly) {
1349 d_offs = sym_op->aead.data.offset;
1350 d_lens = sym_op->aead.data.length;
1352 sym_op->aead.data.offset + sym_op->aead.data.length;
1354 aad_data = sym_op->aead.aad.data;
1355 aad_len = sess->aad_length;
1356 if (likely((aad_data + aad_len) ==
1357 rte_pktmbuf_mtod_offset(m_src, uint8_t *,
1358 sym_op->aead.data.offset))) {
1359 d_offs = (d_offs - aad_len) | (d_offs << 16);
1360 d_lens = (d_lens + aad_len) | (d_lens << 32);
1362 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
1363 fc_params.aad_buf.size = aad_len;
1364 flags |= ROC_SE_VALID_AAD_BUF;
1366 d_offs = d_offs << 16;
1367 d_lens = d_lens << 32;
1370 salt = fc_params.iv_buf;
1371 if (unlikely(*(uint32_t *)salt != sess->salt)) {
1372 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
1373 sess->salt = *(uint32_t *)salt;
1375 fc_params.iv_buf = salt + 4;
1376 if (likely(sess->mac_len)) {
1377 struct rte_mbuf *m =
1378 (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
1383 /* hmac immediately following data is best case */
1384 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1386 (uint8_t *)sym_op->aead.digest.data)) {
1387 flags |= ROC_SE_VALID_MAC_BUF;
1388 fc_params.mac_buf.size = sess->mac_len;
1389 fc_params.mac_buf.vaddr =
1390 sym_op->aead.digest.data;
1395 d_offs = sym_op->cipher.data.offset;
1396 d_lens = sym_op->cipher.data.length;
1398 sym_op->cipher.data.offset + sym_op->cipher.data.length;
1399 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
1400 d_lens = (d_lens << 32) | sym_op->auth.data.length;
1403 (sym_op->auth.data.offset + sym_op->auth.data.length)) {
1404 mc_hash_off = (sym_op->auth.data.offset +
1405 sym_op->auth.data.length);
1407 /* for gmac, salt should be updated like in gcm */
1408 if (unlikely(sess->is_gmac)) {
1410 salt = fc_params.iv_buf;
1411 if (unlikely(*(uint32_t *)salt != sess->salt)) {
1412 cpt_fc_salt_update(&sess->roc_se_ctx, salt);
1413 sess->salt = *(uint32_t *)salt;
1415 fc_params.iv_buf = salt + 4;
1417 if (likely(sess->mac_len)) {
1420 m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
1424 /* hmac immediately following data is best case */
1425 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
1426 (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1428 (uint8_t *)sym_op->auth.digest.data))) {
1429 flags |= ROC_SE_VALID_MAC_BUF;
1430 fc_params.mac_buf.size = sess->mac_len;
1431 fc_params.mac_buf.vaddr =
1432 sym_op->auth.digest.data;
1437 fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
1439 if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
1440 unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
1443 if (likely(!m_dst && inplace)) {
1444 /* Case of single buffer without AAD buf or
1445 * separate mac buf in place and
1448 fc_params.dst_iov = fc_params.src_iov = (void *)src;
1450 if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
1452 plt_dp_err("Prepare inplace src iov failed");
1458 /* Out of place processing */
1459 fc_params.src_iov = (void *)src;
1460 fc_params.dst_iov = (void *)dst;
1462 /* Store SG I/O in the api for reuse */
1463 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
1464 plt_dp_err("Prepare src iov failed");
1469 if (unlikely(m_dst != NULL)) {
1472 /* Try to make room as much as src has */
1473 pkt_len = rte_pktmbuf_pkt_len(m_dst);
1475 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
1476 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
1477 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
1478 plt_dp_err("Not enough space in "
1487 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
1488 plt_dp_err("Prepare dst iov failed for "
1495 fc_params.dst_iov = (void *)src;
1499 if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
1500 (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
1501 ((ctx->fc_type == ROC_SE_FC_GEN) ||
1502 (ctx->fc_type == ROC_SE_PDCP))))) {
1503 mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
1504 m_info->pool, infl_req);
1505 if (mdata == NULL) {
1506 plt_dp_err("Error allocating meta buffer for request");
1511 /* Finally prepare the instruction */
1512 if (cpt_op & ROC_SE_OP_ENCODE)
1513 ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
1516 ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
1519 if (unlikely(ret)) {
1520 plt_dp_err("Preparing request failed due to bad input arg");
1521 goto free_mdata_and_exit;
1526 free_mdata_and_exit:
1527 if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
1528 rte_mempool_put(m_info->pool, infl_req->mdata);
1533 #endif /*_CNXK_SE_H_ */