1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
108 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
113 key_len = key_len / 2;
114 if (unlikely(key_len == CPT_BYTE_24)) {
115 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
118 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
124 if (unlikely(key_len != 16))
126 /* No support for AEAD yet */
127 if (unlikely(ctx->hash_type))
129 fc_type = ZUC_SNOW3G;
133 if (unlikely(key_len != 16))
135 /* No support for AEAD yet */
136 if (unlikely(ctx->hash_type))
144 ctx->fc_type = fc_type;
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 fctx->enc.enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 fctx->enc.aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->zsk_flags = 0;
198 static __rte_always_inline void
199 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
203 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
204 cpt_ctx->zsk_flags = 0;
207 static __rte_always_inline void
208 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
211 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
212 cpt_ctx->zsk_flags = 0;
215 static __rte_always_inline int
216 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
217 uint16_t key_len, uint8_t *salt)
219 struct cpt_ctx *cpt_ctx = ctx;
220 mc_fc_context_t *fctx = &cpt_ctx->fctx;
223 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
227 if (cpt_ctx->fc_type == FC_GEN) {
229 * We need to always say IV is from DPTR as user can
230 * sometimes iverride IV per operation.
232 fctx->enc.iv_source = CPT_FROM_DPTR;
237 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
240 /* CPT performs DES using 3DES with the 8B DES-key
241 * replicated 2 more times to match the 24B 3DES-key.
242 * Eg. If org. key is "0x0a 0x0b", then new key is
243 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
246 /* Skipping the first 8B as it will be copied
247 * in the regular code flow
249 memcpy(fctx->enc.encr_key+key_len, key, key_len);
250 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
254 /* For DES3_ECB IV need to be from CTX. */
255 fctx->enc.iv_source = CPT_FROM_CTX;
261 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
264 /* Even though iv source is from dptr,
265 * aes_gcm salt is taken from ctx
268 memcpy(fctx->enc.encr_iv, salt, 4);
269 /* Assuming it was just salt update
275 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
278 key_len = key_len / 2;
279 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
281 /* Copy key2 for XTS into ipad */
282 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
283 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
286 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
289 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
292 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
295 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
301 /* Only for FC_GEN case */
303 /* For GMAC auth, cipher must be NULL */
304 if (cpt_ctx->hash_type != GMAC_TYPE)
305 fctx->enc.enc_cipher = type;
307 memcpy(fctx->enc.encr_key, key, key_len);
310 cpt_ctx->enc_cipher = type;
315 static __rte_always_inline uint32_t
316 fill_sg_comp(sg_comp_t *list,
318 phys_addr_t dma_addr,
321 sg_comp_t *to = &list[i>>2];
323 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
324 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
329 static __rte_always_inline uint32_t
330 fill_sg_comp_from_buf(sg_comp_t *list,
334 sg_comp_t *to = &list[i>>2];
336 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
337 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
342 static __rte_always_inline uint32_t
343 fill_sg_comp_from_buf_min(sg_comp_t *list,
348 sg_comp_t *to = &list[i >> 2];
349 uint32_t size = *psize;
352 e_len = (size > from->size) ? from->size : size;
353 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
354 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
361 * This fills the MC expected SGIO list
362 * from IOV given by user.
364 static __rte_always_inline uint32_t
365 fill_sg_comp_from_iov(sg_comp_t *list,
367 iov_ptr_t *from, uint32_t from_offset,
368 uint32_t *psize, buf_ptr_t *extra_buf,
369 uint32_t extra_offset)
372 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
373 uint32_t size = *psize - extra_len;
377 for (j = 0; (j < from->buf_cnt) && size; j++) {
378 phys_addr_t e_dma_addr;
380 sg_comp_t *to = &list[i >> 2];
385 if (unlikely(from_offset)) {
386 if (from_offset >= bufs[j].size) {
387 from_offset -= bufs[j].size;
390 e_dma_addr = bufs[j].dma_addr + from_offset;
391 e_len = (size > (bufs[j].size - from_offset)) ?
392 (bufs[j].size - from_offset) : size;
395 e_dma_addr = bufs[j].dma_addr;
396 e_len = (size > bufs[j].size) ?
400 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
401 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
403 if (extra_len && (e_len >= extra_offset)) {
404 /* Break the data at given offset */
405 uint32_t next_len = e_len - extra_offset;
406 phys_addr_t next_dma = e_dma_addr + extra_offset;
411 e_len = extra_offset;
413 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
416 /* Insert extra data ptr */
421 rte_cpu_to_be_16(extra_buf->size);
423 rte_cpu_to_be_64(extra_buf->dma_addr);
425 /* size already decremented by extra len */
428 /* insert the rest of the data */
432 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
433 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
442 extra_offset -= size;
450 static __rte_always_inline void
451 cpt_digest_gen_prep(uint32_t flags,
453 digest_params_t *params,
457 struct cpt_request_info *req;
459 uint16_t data_len, mac_len, key_len;
460 auth_type_t hash_type;
463 sg_comp_t *gather_comp;
464 sg_comp_t *scatter_comp;
466 uint32_t g_size_bytes, s_size_bytes;
467 uint64_t dptr_dma, rptr_dma;
468 vq_cmd_word0_t vq_cmd_w0;
469 vq_cmd_word3_t vq_cmd_w3;
470 void *c_vaddr, *m_vaddr;
471 uint64_t c_dma, m_dma;
472 opcode_info_t opcode;
474 ctx = params->ctx_buf.vaddr;
475 meta_p = ¶ms->meta_buf;
477 m_vaddr = meta_p->vaddr;
478 m_dma = meta_p->dma_addr;
481 * Save initial space that followed app data for completion code &
482 * alternate completion code to fall in same cache line as app data
484 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
485 m_dma += COMPLETION_CODE_SIZE;
486 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
488 c_vaddr = (uint8_t *)m_vaddr + size;
489 c_dma = m_dma + size;
490 size += sizeof(cpt_res_s_t);
492 m_vaddr = (uint8_t *)m_vaddr + size;
497 size = sizeof(struct cpt_request_info);
498 m_vaddr = (uint8_t *)m_vaddr + size;
501 hash_type = ctx->hash_type;
502 mac_len = ctx->mac_len;
503 key_len = ctx->auth_key_len;
504 data_len = AUTH_DLEN(d_lens);
508 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
510 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
511 vq_cmd_w0.s.param1 = key_len;
512 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
514 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
515 vq_cmd_w0.s.param1 = 0;
516 vq_cmd_w0.s.dlen = data_len;
521 /* Null auth only case enters the if */
522 if (unlikely(!hash_type && !ctx->enc_cipher)) {
523 opcode.s.major = CPT_MAJOR_OP_MISC;
524 /* Minor op is passthrough */
525 opcode.s.minor = 0x03;
526 /* Send out completion code only */
527 vq_cmd_w0.s.param2 = 0x1;
530 vq_cmd_w0.s.opcode = opcode.flags;
532 /* DPTR has SG list */
536 ((uint16_t *)in_buffer)[0] = 0;
537 ((uint16_t *)in_buffer)[1] = 0;
539 /* TODO Add error check if space will be sufficient */
540 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
549 uint64_t k_dma = params->ctx_buf.dma_addr +
550 offsetof(struct cpt_ctx, auth_key);
552 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
558 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
560 if (unlikely(size)) {
561 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
567 * Looks like we need to support zero data
568 * gather ptr in case of hash & hmac
572 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
573 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
580 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
582 if (flags & VALID_MAC_BUF) {
583 if (unlikely(params->mac_buf.size < mac_len)) {
584 CPT_LOG_DP_ERR("Insufficient MAC size");
589 i = fill_sg_comp_from_buf_min(scatter_comp, i,
590 ¶ms->mac_buf, &size);
593 i = fill_sg_comp_from_iov(scatter_comp, i,
594 params->src_iov, data_len,
596 if (unlikely(size)) {
597 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
603 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
604 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
606 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
608 /* This is DPTR len incase of SG mode */
609 vq_cmd_w0.s.dlen = size;
611 m_vaddr = (uint8_t *)m_vaddr + size;
614 /* cpt alternate completion address saved earlier */
615 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
616 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
617 rptr_dma = c_dma - 8;
619 req->ist.ei1 = dptr_dma;
620 req->ist.ei2 = rptr_dma;
625 /* 16 byte aligned cpt res address */
626 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
627 *req->completion_addr = COMPLETION_CODE_INIT;
628 req->comp_baddr = c_dma;
630 /* Fill microcode part of instruction */
631 req->ist.ei0 = vq_cmd_w0.u64;
632 req->ist.ei3 = vq_cmd_w3.u64;
640 static __rte_always_inline void
641 cpt_enc_hmac_prep(uint32_t flags,
644 fc_params_t *fc_params,
648 uint32_t iv_offset = 0;
649 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
650 struct cpt_ctx *cpt_ctx;
651 uint32_t cipher_type, hash_type;
652 uint32_t mac_len, size;
654 struct cpt_request_info *req;
655 buf_ptr_t *meta_p, *aad_buf = NULL;
656 uint32_t encr_offset, auth_offset;
657 uint32_t encr_data_len, auth_data_len, aad_len = 0;
658 uint32_t passthrough_len = 0;
659 void *m_vaddr, *offset_vaddr;
660 uint64_t m_dma, offset_dma, ctx_dma;
661 vq_cmd_word0_t vq_cmd_w0;
662 vq_cmd_word3_t vq_cmd_w3;
665 opcode_info_t opcode;
667 meta_p = &fc_params->meta_buf;
668 m_vaddr = meta_p->vaddr;
669 m_dma = meta_p->dma_addr;
671 encr_offset = ENCR_OFFSET(d_offs);
672 auth_offset = AUTH_OFFSET(d_offs);
673 encr_data_len = ENCR_DLEN(d_lens);
674 auth_data_len = AUTH_DLEN(d_lens);
675 if (unlikely(flags & VALID_AAD_BUF)) {
677 * We dont support both aad
678 * and auth data separately
682 aad_len = fc_params->aad_buf.size;
683 aad_buf = &fc_params->aad_buf;
685 cpt_ctx = fc_params->ctx_buf.vaddr;
686 cipher_type = cpt_ctx->enc_cipher;
687 hash_type = cpt_ctx->hash_type;
688 mac_len = cpt_ctx->mac_len;
691 * Save initial space that followed app data for completion code &
692 * alternate completion code to fall in same cache line as app data
694 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
695 m_dma += COMPLETION_CODE_SIZE;
696 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
699 c_vaddr = (uint8_t *)m_vaddr + size;
700 c_dma = m_dma + size;
701 size += sizeof(cpt_res_s_t);
703 m_vaddr = (uint8_t *)m_vaddr + size;
706 /* start cpt request info struct at 8 byte boundary */
707 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
710 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
712 size += sizeof(struct cpt_request_info);
713 m_vaddr = (uint8_t *)m_vaddr + size;
716 if (hash_type == GMAC_TYPE)
719 if (unlikely(!(flags & VALID_IV_BUF))) {
721 iv_offset = ENCR_IV_OFFSET(d_offs);
724 if (unlikely(flags & VALID_AAD_BUF)) {
726 * When AAD is given, data above encr_offset is pass through
727 * Since AAD is given as separate pointer and not as offset,
728 * this is a special case as we need to fragment input data
729 * into passthrough + encr_data and then insert AAD in between.
731 if (hash_type != GMAC_TYPE) {
732 passthrough_len = encr_offset;
733 auth_offset = passthrough_len + iv_len;
734 encr_offset = passthrough_len + aad_len + iv_len;
735 auth_data_len = aad_len + encr_data_len;
737 passthrough_len = 16 + aad_len;
738 auth_offset = passthrough_len + iv_len;
739 auth_data_len = aad_len;
742 encr_offset += iv_len;
743 auth_offset += iv_len;
747 opcode.s.major = CPT_MAJOR_OP_FC;
750 auth_dlen = auth_offset + auth_data_len;
751 enc_dlen = encr_data_len + encr_offset;
752 if (unlikely(encr_data_len & 0xf)) {
753 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
754 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
755 else if (likely((cipher_type == AES_CBC) ||
756 (cipher_type == AES_ECB)))
757 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
760 if (unlikely(hash_type == GMAC_TYPE)) {
761 encr_offset = auth_dlen;
765 if (unlikely(auth_dlen > enc_dlen)) {
766 inputlen = auth_dlen;
767 outputlen = auth_dlen + mac_len;
770 outputlen = enc_dlen + mac_len;
775 vq_cmd_w0.s.param1 = encr_data_len;
776 vq_cmd_w0.s.param2 = auth_data_len;
778 * In 83XX since we have a limitation of
779 * IV & Offset control word not part of instruction
780 * and need to be part of Data Buffer, we check if
781 * head room is there and then only do the Direct mode processing
783 if (likely((flags & SINGLE_BUF_INPLACE) &&
784 (flags & SINGLE_BUF_HEADTAILROOM))) {
785 void *dm_vaddr = fc_params->bufs[0].vaddr;
786 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
788 * This flag indicates that there is 24 bytes head room and
789 * 8 bytes tail room available, so that we get to do
790 * DIRECT MODE with limitation
793 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
794 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
797 req->ist.ei1 = offset_dma;
798 /* RPTR should just exclude offset control word */
799 req->ist.ei2 = dm_dma_addr - iv_len;
800 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
801 + outputlen - iv_len);
803 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
805 vq_cmd_w0.s.opcode = opcode.flags;
807 if (likely(iv_len)) {
808 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
810 uint64_t *src = fc_params->iv_buf;
815 *(uint64_t *)offset_vaddr =
816 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
817 ((uint64_t)iv_offset << 8) |
818 ((uint64_t)auth_offset));
821 uint32_t i, g_size_bytes, s_size_bytes;
822 uint64_t dptr_dma, rptr_dma;
823 sg_comp_t *gather_comp;
824 sg_comp_t *scatter_comp;
827 /* This falls under strict SG mode */
828 offset_vaddr = m_vaddr;
830 size = OFF_CTRL_LEN + iv_len;
832 m_vaddr = (uint8_t *)m_vaddr + size;
835 opcode.s.major |= CPT_DMA_MODE;
837 vq_cmd_w0.s.opcode = opcode.flags;
839 if (likely(iv_len)) {
840 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
842 uint64_t *src = fc_params->iv_buf;
847 *(uint64_t *)offset_vaddr =
848 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
849 ((uint64_t)iv_offset << 8) |
850 ((uint64_t)auth_offset));
852 /* DPTR has SG list */
856 ((uint16_t *)in_buffer)[0] = 0;
857 ((uint16_t *)in_buffer)[1] = 0;
859 /* TODO Add error check if space will be sufficient */
860 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
868 /* Offset control word that includes iv */
869 i = fill_sg_comp(gather_comp, i, offset_dma,
870 OFF_CTRL_LEN + iv_len);
873 size = inputlen - iv_len;
875 uint32_t aad_offset = aad_len ? passthrough_len : 0;
877 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
878 i = fill_sg_comp_from_buf_min(gather_comp, i,
882 i = fill_sg_comp_from_iov(gather_comp, i,
885 aad_buf, aad_offset);
888 if (unlikely(size)) {
889 CPT_LOG_DP_ERR("Insufficient buffer space,"
890 " size %d needed", size);
894 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
895 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
898 * Output Scatter list
902 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
905 if (likely(iv_len)) {
906 i = fill_sg_comp(scatter_comp, i,
907 offset_dma + OFF_CTRL_LEN,
911 /* output data or output data + digest*/
912 if (unlikely(flags & VALID_MAC_BUF)) {
913 size = outputlen - iv_len - mac_len;
915 uint32_t aad_offset =
916 aad_len ? passthrough_len : 0;
918 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
919 i = fill_sg_comp_from_buf_min(
925 i = fill_sg_comp_from_iov(scatter_comp,
933 if (unlikely(size)) {
934 CPT_LOG_DP_ERR("Insufficient buffer"
935 " space, size %d needed",
942 i = fill_sg_comp_from_buf(scatter_comp, i,
943 &fc_params->mac_buf);
946 /* Output including mac */
947 size = outputlen - iv_len;
949 uint32_t aad_offset =
950 aad_len ? passthrough_len : 0;
952 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
953 i = fill_sg_comp_from_buf_min(
959 i = fill_sg_comp_from_iov(scatter_comp,
967 if (unlikely(size)) {
968 CPT_LOG_DP_ERR("Insufficient buffer"
969 " space, size %d needed",
975 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
976 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
978 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
980 /* This is DPTR len incase of SG mode */
981 vq_cmd_w0.s.dlen = size;
983 m_vaddr = (uint8_t *)m_vaddr + size;
986 /* cpt alternate completion address saved earlier */
987 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
988 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
989 rptr_dma = c_dma - 8;
991 req->ist.ei1 = dptr_dma;
992 req->ist.ei2 = rptr_dma;
995 ctx_dma = fc_params->ctx_buf.dma_addr +
996 offsetof(struct cpt_ctx, fctx);
1000 vq_cmd_w3.s.cptr = ctx_dma;
1002 /* 16 byte aligned cpt res address */
1003 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1004 *req->completion_addr = COMPLETION_CODE_INIT;
1005 req->comp_baddr = c_dma;
1007 /* Fill microcode part of instruction */
1008 req->ist.ei0 = vq_cmd_w0.u64;
1009 req->ist.ei3 = vq_cmd_w3.u64;
1017 static __rte_always_inline void
1018 cpt_dec_hmac_prep(uint32_t flags,
1021 fc_params_t *fc_params,
1025 uint32_t iv_offset = 0, size;
1026 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1027 struct cpt_ctx *cpt_ctx;
1028 int32_t hash_type, mac_len;
1029 uint8_t iv_len = 16;
1030 struct cpt_request_info *req;
1031 buf_ptr_t *meta_p, *aad_buf = NULL;
1032 uint32_t encr_offset, auth_offset;
1033 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1034 uint32_t passthrough_len = 0;
1035 void *m_vaddr, *offset_vaddr;
1036 uint64_t m_dma, offset_dma, ctx_dma;
1037 opcode_info_t opcode;
1038 vq_cmd_word0_t vq_cmd_w0;
1039 vq_cmd_word3_t vq_cmd_w3;
1043 meta_p = &fc_params->meta_buf;
1044 m_vaddr = meta_p->vaddr;
1045 m_dma = meta_p->dma_addr;
1047 encr_offset = ENCR_OFFSET(d_offs);
1048 auth_offset = AUTH_OFFSET(d_offs);
1049 encr_data_len = ENCR_DLEN(d_lens);
1050 auth_data_len = AUTH_DLEN(d_lens);
1052 if (unlikely(flags & VALID_AAD_BUF)) {
1054 * We dont support both aad
1055 * and auth data separately
1059 aad_len = fc_params->aad_buf.size;
1060 aad_buf = &fc_params->aad_buf;
1063 cpt_ctx = fc_params->ctx_buf.vaddr;
1064 hash_type = cpt_ctx->hash_type;
1065 mac_len = cpt_ctx->mac_len;
1067 if (hash_type == GMAC_TYPE)
1070 if (unlikely(!(flags & VALID_IV_BUF))) {
1072 iv_offset = ENCR_IV_OFFSET(d_offs);
1075 if (unlikely(flags & VALID_AAD_BUF)) {
1077 * When AAD is given, data above encr_offset is pass through
1078 * Since AAD is given as separate pointer and not as offset,
1079 * this is a special case as we need to fragment input data
1080 * into passthrough + encr_data and then insert AAD in between.
1082 if (hash_type != GMAC_TYPE) {
1083 passthrough_len = encr_offset;
1084 auth_offset = passthrough_len + iv_len;
1085 encr_offset = passthrough_len + aad_len + iv_len;
1086 auth_data_len = aad_len + encr_data_len;
1088 passthrough_len = 16 + aad_len;
1089 auth_offset = passthrough_len + iv_len;
1090 auth_data_len = aad_len;
1093 encr_offset += iv_len;
1094 auth_offset += iv_len;
1098 * Save initial space that followed app data for completion code &
1099 * alternate completion code to fall in same cache line as app data
1101 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1102 m_dma += COMPLETION_CODE_SIZE;
1103 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1105 c_vaddr = (uint8_t *)m_vaddr + size;
1106 c_dma = m_dma + size;
1107 size += sizeof(cpt_res_s_t);
1109 m_vaddr = (uint8_t *)m_vaddr + size;
1112 /* start cpt request info structure at 8 byte alignment */
1113 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1116 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1118 size += sizeof(struct cpt_request_info);
1119 m_vaddr = (uint8_t *)m_vaddr + size;
1123 opcode.s.major = CPT_MAJOR_OP_FC;
1126 enc_dlen = encr_offset + encr_data_len;
1127 auth_dlen = auth_offset + auth_data_len;
1129 if (auth_dlen > enc_dlen) {
1130 inputlen = auth_dlen + mac_len;
1131 outputlen = auth_dlen;
1133 inputlen = enc_dlen + mac_len;
1134 outputlen = enc_dlen;
1137 if (hash_type == GMAC_TYPE)
1138 encr_offset = inputlen;
1141 vq_cmd_w0.s.param1 = encr_data_len;
1142 vq_cmd_w0.s.param2 = auth_data_len;
1145 * In 83XX since we have a limitation of
1146 * IV & Offset control word not part of instruction
1147 * and need to be part of Data Buffer, we check if
1148 * head room is there and then only do the Direct mode processing
1150 if (likely((flags & SINGLE_BUF_INPLACE) &&
1151 (flags & SINGLE_BUF_HEADTAILROOM))) {
1152 void *dm_vaddr = fc_params->bufs[0].vaddr;
1153 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1155 * This flag indicates that there is 24 bytes head room and
1156 * 8 bytes tail room available, so that we get to do
1157 * DIRECT MODE with limitation
1160 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1161 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1162 req->ist.ei1 = offset_dma;
1164 /* RPTR should just exclude offset control word */
1165 req->ist.ei2 = dm_dma_addr - iv_len;
1167 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1168 outputlen - iv_len);
1169 /* since this is decryption,
1170 * don't touch the content of
1171 * alternate ccode space as it contains
1175 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1177 vq_cmd_w0.s.opcode = opcode.flags;
1179 if (likely(iv_len)) {
1180 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1182 uint64_t *src = fc_params->iv_buf;
1187 *(uint64_t *)offset_vaddr =
1188 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1189 ((uint64_t)iv_offset << 8) |
1190 ((uint64_t)auth_offset));
1193 uint64_t dptr_dma, rptr_dma;
1194 uint32_t g_size_bytes, s_size_bytes;
1195 sg_comp_t *gather_comp;
1196 sg_comp_t *scatter_comp;
1200 /* This falls under strict SG mode */
1201 offset_vaddr = m_vaddr;
1203 size = OFF_CTRL_LEN + iv_len;
1205 m_vaddr = (uint8_t *)m_vaddr + size;
1208 opcode.s.major |= CPT_DMA_MODE;
1210 vq_cmd_w0.s.opcode = opcode.flags;
1212 if (likely(iv_len)) {
1213 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1215 uint64_t *src = fc_params->iv_buf;
1220 *(uint64_t *)offset_vaddr =
1221 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1222 ((uint64_t)iv_offset << 8) |
1223 ((uint64_t)auth_offset));
1225 /* DPTR has SG list */
1226 in_buffer = m_vaddr;
1229 ((uint16_t *)in_buffer)[0] = 0;
1230 ((uint16_t *)in_buffer)[1] = 0;
1232 /* TODO Add error check if space will be sufficient */
1233 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1240 /* Offset control word that includes iv */
1241 i = fill_sg_comp(gather_comp, i, offset_dma,
1242 OFF_CTRL_LEN + iv_len);
1244 /* Add input data */
1245 if (flags & VALID_MAC_BUF) {
1246 size = inputlen - iv_len - mac_len;
1248 /* input data only */
1249 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1250 i = fill_sg_comp_from_buf_min(
1255 uint32_t aad_offset = aad_len ?
1256 passthrough_len : 0;
1258 i = fill_sg_comp_from_iov(gather_comp,
1265 if (unlikely(size)) {
1266 CPT_LOG_DP_ERR("Insufficient buffer"
1267 " space, size %d needed",
1275 i = fill_sg_comp_from_buf(gather_comp, i,
1276 &fc_params->mac_buf);
1279 /* input data + mac */
1280 size = inputlen - iv_len;
1282 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1283 i = fill_sg_comp_from_buf_min(
1288 uint32_t aad_offset = aad_len ?
1289 passthrough_len : 0;
1291 if (unlikely(!fc_params->src_iov)) {
1292 CPT_LOG_DP_ERR("Bad input args");
1296 i = fill_sg_comp_from_iov(
1304 if (unlikely(size)) {
1305 CPT_LOG_DP_ERR("Insufficient buffer"
1306 " space, size %d needed",
1312 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1313 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1316 * Output Scatter List
1321 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1325 i = fill_sg_comp(scatter_comp, i,
1326 offset_dma + OFF_CTRL_LEN,
1330 /* Add output data */
1331 size = outputlen - iv_len;
1333 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1334 /* handle single buffer here */
1335 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1339 uint32_t aad_offset = aad_len ?
1340 passthrough_len : 0;
1342 if (unlikely(!fc_params->dst_iov)) {
1343 CPT_LOG_DP_ERR("Bad input args");
1347 i = fill_sg_comp_from_iov(scatter_comp, i,
1348 fc_params->dst_iov, 0,
1353 if (unlikely(size)) {
1354 CPT_LOG_DP_ERR("Insufficient buffer space,"
1355 " size %d needed", size);
1360 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1361 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1363 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1365 /* This is DPTR len incase of SG mode */
1366 vq_cmd_w0.s.dlen = size;
1368 m_vaddr = (uint8_t *)m_vaddr + size;
1371 /* cpt alternate completion address saved earlier */
1372 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1373 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1374 rptr_dma = c_dma - 8;
1375 size += COMPLETION_CODE_SIZE;
1377 req->ist.ei1 = dptr_dma;
1378 req->ist.ei2 = rptr_dma;
1381 ctx_dma = fc_params->ctx_buf.dma_addr +
1382 offsetof(struct cpt_ctx, fctx);
1385 vq_cmd_w3.s.grp = 0;
1386 vq_cmd_w3.s.cptr = ctx_dma;
1388 /* 16 byte aligned cpt res address */
1389 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1390 *req->completion_addr = COMPLETION_CODE_INIT;
1391 req->comp_baddr = c_dma;
1393 /* Fill microcode part of instruction */
1394 req->ist.ei0 = vq_cmd_w0.u64;
1395 req->ist.ei3 = vq_cmd_w3.u64;
1403 static __rte_always_inline void
1404 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1407 fc_params_t *params,
1412 int32_t inputlen, outputlen;
1413 struct cpt_ctx *cpt_ctx;
1414 uint32_t mac_len = 0;
1416 struct cpt_request_info *req;
1418 uint32_t encr_offset = 0, auth_offset = 0;
1419 uint32_t encr_data_len = 0, auth_data_len = 0;
1420 int flags, iv_len = 16;
1421 void *m_vaddr, *c_vaddr;
1422 uint64_t m_dma, c_dma, offset_ctrl;
1423 uint64_t *offset_vaddr, offset_dma;
1424 uint32_t *iv_s, iv[4];
1425 vq_cmd_word0_t vq_cmd_w0;
1426 vq_cmd_word3_t vq_cmd_w3;
1427 opcode_info_t opcode;
1429 buf_p = ¶ms->meta_buf;
1430 m_vaddr = buf_p->vaddr;
1431 m_dma = buf_p->dma_addr;
1433 cpt_ctx = params->ctx_buf.vaddr;
1434 flags = cpt_ctx->zsk_flags;
1435 mac_len = cpt_ctx->mac_len;
1436 snow3g = cpt_ctx->snow3g;
1439 * Save initial space that followed app data for completion code &
1440 * alternate completion code to fall in same cache line as app data
1442 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1443 m_dma += COMPLETION_CODE_SIZE;
1444 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1447 c_vaddr = (uint8_t *)m_vaddr + size;
1448 c_dma = m_dma + size;
1449 size += sizeof(cpt_res_s_t);
1451 m_vaddr = (uint8_t *)m_vaddr + size;
1454 /* Reserve memory for cpt request info */
1457 size = sizeof(struct cpt_request_info);
1458 m_vaddr = (uint8_t *)m_vaddr + size;
1461 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1463 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1465 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1466 (0 << 3) | (flags & 0x7));
1470 * Microcode expects offsets in bytes
1471 * TODO: Rounding off
1473 auth_data_len = AUTH_DLEN(d_lens);
1476 auth_offset = AUTH_OFFSET(d_offs);
1477 auth_offset = auth_offset / 8;
1479 /* consider iv len */
1480 auth_offset += iv_len;
1482 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1483 outputlen = mac_len;
1485 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1490 * Microcode expects offsets in bytes
1491 * TODO: Rounding off
1493 encr_data_len = ENCR_DLEN(d_lens);
1495 encr_offset = ENCR_OFFSET(d_offs);
1496 encr_offset = encr_offset / 8;
1497 /* consider iv len */
1498 encr_offset += iv_len;
1500 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1501 outputlen = inputlen;
1503 /* iv offset is 0 */
1504 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1508 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1513 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1514 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1517 for (j = 0; j < 4; j++)
1518 iv[j] = iv_s[3 - j];
1520 /* ZUC doesn't need a swap */
1521 for (j = 0; j < 4; j++)
1526 * GP op header, lengths are expected in bits.
1529 vq_cmd_w0.s.param1 = encr_data_len;
1530 vq_cmd_w0.s.param2 = auth_data_len;
1533 * In 83XX since we have a limitation of
1534 * IV & Offset control word not part of instruction
1535 * and need to be part of Data Buffer, we check if
1536 * head room is there and then only do the Direct mode processing
1538 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1539 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1540 void *dm_vaddr = params->bufs[0].vaddr;
1541 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1543 * This flag indicates that there is 24 bytes head room and
1544 * 8 bytes tail room available, so that we get to do
1545 * DIRECT MODE with limitation
1548 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1549 OFF_CTRL_LEN - iv_len);
1550 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1553 req->ist.ei1 = offset_dma;
1554 /* RPTR should just exclude offset control word */
1555 req->ist.ei2 = dm_dma_addr - iv_len;
1556 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1557 + outputlen - iv_len);
1559 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1561 vq_cmd_w0.s.opcode = opcode.flags;
1563 if (likely(iv_len)) {
1564 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1566 memcpy(iv_d, iv, 16);
1569 *offset_vaddr = offset_ctrl;
1571 uint32_t i, g_size_bytes, s_size_bytes;
1572 uint64_t dptr_dma, rptr_dma;
1573 sg_comp_t *gather_comp;
1574 sg_comp_t *scatter_comp;
1578 /* save space for iv */
1579 offset_vaddr = m_vaddr;
1582 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1583 m_dma += OFF_CTRL_LEN + iv_len;
1585 opcode.s.major |= CPT_DMA_MODE;
1587 vq_cmd_w0.s.opcode = opcode.flags;
1589 /* DPTR has SG list */
1590 in_buffer = m_vaddr;
1593 ((uint16_t *)in_buffer)[0] = 0;
1594 ((uint16_t *)in_buffer)[1] = 0;
1596 /* TODO Add error check if space will be sufficient */
1597 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1604 /* Offset control word followed by iv */
1606 i = fill_sg_comp(gather_comp, i, offset_dma,
1607 OFF_CTRL_LEN + iv_len);
1609 /* iv offset is 0 */
1610 *offset_vaddr = offset_ctrl;
1612 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1613 memcpy(iv_d, iv, 16);
1616 size = inputlen - iv_len;
1618 i = fill_sg_comp_from_iov(gather_comp, i,
1621 if (unlikely(size)) {
1622 CPT_LOG_DP_ERR("Insufficient buffer space,"
1623 " size %d needed", size);
1627 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1628 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1631 * Output Scatter List
1636 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1639 /* IV in SLIST only for EEA3 & UEA2 */
1644 i = fill_sg_comp(scatter_comp, i,
1645 offset_dma + OFF_CTRL_LEN, iv_len);
1648 /* Add output data */
1649 if (req_flags & VALID_MAC_BUF) {
1650 size = outputlen - iv_len - mac_len;
1652 i = fill_sg_comp_from_iov(scatter_comp, i,
1656 if (unlikely(size)) {
1657 CPT_LOG_DP_ERR("Insufficient buffer space,"
1658 " size %d needed", size);
1665 i = fill_sg_comp_from_buf(scatter_comp, i,
1669 /* Output including mac */
1670 size = outputlen - iv_len;
1672 i = fill_sg_comp_from_iov(scatter_comp, i,
1676 if (unlikely(size)) {
1677 CPT_LOG_DP_ERR("Insufficient buffer space,"
1678 " size %d needed", size);
1683 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1684 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1686 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1688 /* This is DPTR len incase of SG mode */
1689 vq_cmd_w0.s.dlen = size;
1691 m_vaddr = (uint8_t *)m_vaddr + size;
1694 /* cpt alternate completion address saved earlier */
1695 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1696 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1697 rptr_dma = c_dma - 8;
1699 req->ist.ei1 = dptr_dma;
1700 req->ist.ei2 = rptr_dma;
1705 vq_cmd_w3.s.grp = 0;
1706 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1707 offsetof(struct cpt_ctx, zs_ctx);
1709 /* 16 byte aligned cpt res address */
1710 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1711 *req->completion_addr = COMPLETION_CODE_INIT;
1712 req->comp_baddr = c_dma;
1714 /* Fill microcode part of instruction */
1715 req->ist.ei0 = vq_cmd_w0.u64;
1716 req->ist.ei3 = vq_cmd_w3.u64;
1724 static __rte_always_inline void
1725 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1728 fc_params_t *params,
1733 int32_t inputlen = 0, outputlen;
1734 struct cpt_ctx *cpt_ctx;
1735 uint8_t snow3g, iv_len = 16;
1736 struct cpt_request_info *req;
1738 uint32_t encr_offset;
1739 uint32_t encr_data_len;
1741 void *m_vaddr, *c_vaddr;
1742 uint64_t m_dma, c_dma;
1743 uint64_t *offset_vaddr, offset_dma;
1744 uint32_t *iv_s, iv[4], j;
1745 vq_cmd_word0_t vq_cmd_w0;
1746 vq_cmd_word3_t vq_cmd_w3;
1747 opcode_info_t opcode;
1749 buf_p = ¶ms->meta_buf;
1750 m_vaddr = buf_p->vaddr;
1751 m_dma = buf_p->dma_addr;
1754 * Microcode expects offsets in bytes
1755 * TODO: Rounding off
1757 encr_offset = ENCR_OFFSET(d_offs) / 8;
1758 encr_data_len = ENCR_DLEN(d_lens);
1760 cpt_ctx = params->ctx_buf.vaddr;
1761 flags = cpt_ctx->zsk_flags;
1762 snow3g = cpt_ctx->snow3g;
1764 * Save initial space that followed app data for completion code &
1765 * alternate completion code to fall in same cache line as app data
1767 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1768 m_dma += COMPLETION_CODE_SIZE;
1769 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1772 c_vaddr = (uint8_t *)m_vaddr + size;
1773 c_dma = m_dma + size;
1774 size += sizeof(cpt_res_s_t);
1776 m_vaddr = (uint8_t *)m_vaddr + size;
1779 /* Reserve memory for cpt request info */
1782 size = sizeof(struct cpt_request_info);
1783 m_vaddr = (uint8_t *)m_vaddr + size;
1786 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1788 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1790 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1791 (0 << 3) | (flags & 0x7));
1793 /* consider iv len */
1794 encr_offset += iv_len;
1796 inputlen = encr_offset +
1797 (RTE_ALIGN(encr_data_len, 8) / 8);
1798 outputlen = inputlen;
1801 iv_s = params->iv_buf;
1804 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1805 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1808 for (j = 0; j < 4; j++)
1809 iv[j] = iv_s[3 - j];
1811 /* ZUC doesn't need a swap */
1812 for (j = 0; j < 4; j++)
1817 * GP op header, lengths are expected in bits.
1820 vq_cmd_w0.s.param1 = encr_data_len;
1823 * In 83XX since we have a limitation of
1824 * IV & Offset control word not part of instruction
1825 * and need to be part of Data Buffer, we check if
1826 * head room is there and then only do the Direct mode processing
1828 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1829 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1830 void *dm_vaddr = params->bufs[0].vaddr;
1831 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1833 * This flag indicates that there is 24 bytes head room and
1834 * 8 bytes tail room available, so that we get to do
1835 * DIRECT MODE with limitation
1838 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1839 OFF_CTRL_LEN - iv_len);
1840 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1843 req->ist.ei1 = offset_dma;
1844 /* RPTR should just exclude offset control word */
1845 req->ist.ei2 = dm_dma_addr - iv_len;
1846 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1847 + outputlen - iv_len);
1849 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1851 vq_cmd_w0.s.opcode = opcode.flags;
1853 if (likely(iv_len)) {
1854 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1856 memcpy(iv_d, iv, 16);
1859 /* iv offset is 0 */
1860 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1862 uint32_t i, g_size_bytes, s_size_bytes;
1863 uint64_t dptr_dma, rptr_dma;
1864 sg_comp_t *gather_comp;
1865 sg_comp_t *scatter_comp;
1869 /* save space for offset and iv... */
1870 offset_vaddr = m_vaddr;
1873 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1874 m_dma += OFF_CTRL_LEN + iv_len;
1876 opcode.s.major |= CPT_DMA_MODE;
1878 vq_cmd_w0.s.opcode = opcode.flags;
1880 /* DPTR has SG list */
1881 in_buffer = m_vaddr;
1884 ((uint16_t *)in_buffer)[0] = 0;
1885 ((uint16_t *)in_buffer)[1] = 0;
1887 /* TODO Add error check if space will be sufficient */
1888 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1895 /* Offset control word */
1897 /* iv offset is 0 */
1898 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1900 i = fill_sg_comp(gather_comp, i, offset_dma,
1901 OFF_CTRL_LEN + iv_len);
1903 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1904 memcpy(iv_d, iv, 16);
1906 /* Add input data */
1907 size = inputlen - iv_len;
1909 i = fill_sg_comp_from_iov(gather_comp, i,
1912 if (unlikely(size)) {
1913 CPT_LOG_DP_ERR("Insufficient buffer space,"
1914 " size %d needed", size);
1918 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1919 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1922 * Output Scatter List
1927 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1930 i = fill_sg_comp(scatter_comp, i,
1931 offset_dma + OFF_CTRL_LEN,
1934 /* Add output data */
1935 size = outputlen - iv_len;
1937 i = fill_sg_comp_from_iov(scatter_comp, i,
1941 if (unlikely(size)) {
1942 CPT_LOG_DP_ERR("Insufficient buffer space,"
1943 " size %d needed", size);
1947 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1948 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1950 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1952 /* This is DPTR len incase of SG mode */
1953 vq_cmd_w0.s.dlen = size;
1955 m_vaddr = (uint8_t *)m_vaddr + size;
1958 /* cpt alternate completion address saved earlier */
1959 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1960 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1961 rptr_dma = c_dma - 8;
1963 req->ist.ei1 = dptr_dma;
1964 req->ist.ei2 = rptr_dma;
1969 vq_cmd_w3.s.grp = 0;
1970 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1971 offsetof(struct cpt_ctx, zs_ctx);
1973 /* 16 byte aligned cpt res address */
1974 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1975 *req->completion_addr = COMPLETION_CODE_INIT;
1976 req->comp_baddr = c_dma;
1978 /* Fill microcode part of instruction */
1979 req->ist.ei0 = vq_cmd_w0.u64;
1980 req->ist.ei3 = vq_cmd_w3.u64;
1988 static __rte_always_inline void
1989 cpt_kasumi_enc_prep(uint32_t req_flags,
1992 fc_params_t *params,
1997 int32_t inputlen = 0, outputlen = 0;
1998 struct cpt_ctx *cpt_ctx;
1999 uint32_t mac_len = 0;
2001 struct cpt_request_info *req;
2003 uint32_t encr_offset, auth_offset;
2004 uint32_t encr_data_len, auth_data_len;
2006 uint8_t *iv_s, *iv_d, iv_len = 8;
2008 void *m_vaddr, *c_vaddr;
2009 uint64_t m_dma, c_dma;
2010 uint64_t *offset_vaddr, offset_dma;
2011 vq_cmd_word0_t vq_cmd_w0;
2012 vq_cmd_word3_t vq_cmd_w3;
2013 opcode_info_t opcode;
2015 uint32_t g_size_bytes, s_size_bytes;
2016 uint64_t dptr_dma, rptr_dma;
2017 sg_comp_t *gather_comp;
2018 sg_comp_t *scatter_comp;
2020 buf_p = ¶ms->meta_buf;
2021 m_vaddr = buf_p->vaddr;
2022 m_dma = buf_p->dma_addr;
2024 encr_offset = ENCR_OFFSET(d_offs) / 8;
2025 auth_offset = AUTH_OFFSET(d_offs) / 8;
2026 encr_data_len = ENCR_DLEN(d_lens);
2027 auth_data_len = AUTH_DLEN(d_lens);
2029 cpt_ctx = params->ctx_buf.vaddr;
2030 flags = cpt_ctx->zsk_flags;
2031 mac_len = cpt_ctx->mac_len;
2034 iv_s = params->iv_buf;
2036 iv_s = params->auth_iv_buf;
2038 dir = iv_s[8] & 0x1;
2041 * Save initial space that followed app data for completion code &
2042 * alternate completion code to fall in same cache line as app data
2044 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2045 m_dma += COMPLETION_CODE_SIZE;
2046 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2049 c_vaddr = (uint8_t *)m_vaddr + size;
2050 c_dma = m_dma + size;
2051 size += sizeof(cpt_res_s_t);
2053 m_vaddr = (uint8_t *)m_vaddr + size;
2056 /* Reserve memory for cpt request info */
2059 size = sizeof(struct cpt_request_info);
2060 m_vaddr = (uint8_t *)m_vaddr + size;
2063 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2065 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2066 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2067 (dir << 4) | (0 << 3) | (flags & 0x7));
2070 * GP op header, lengths are expected in bits.
2073 vq_cmd_w0.s.param1 = encr_data_len;
2074 vq_cmd_w0.s.param2 = auth_data_len;
2075 vq_cmd_w0.s.opcode = opcode.flags;
2077 /* consider iv len */
2079 encr_offset += iv_len;
2080 auth_offset += iv_len;
2083 /* save space for offset ctrl and iv */
2084 offset_vaddr = m_vaddr;
2087 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2088 m_dma += OFF_CTRL_LEN + iv_len;
2090 /* DPTR has SG list */
2091 in_buffer = m_vaddr;
2094 ((uint16_t *)in_buffer)[0] = 0;
2095 ((uint16_t *)in_buffer)[1] = 0;
2097 /* TODO Add error check if space will be sufficient */
2098 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2105 /* Offset control word followed by iv */
2108 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2109 outputlen = inputlen;
2110 /* iv offset is 0 */
2111 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2113 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2114 outputlen = mac_len;
2115 /* iv offset is 0 */
2116 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2119 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2122 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2123 memcpy(iv_d, iv_s, iv_len);
2126 size = inputlen - iv_len;
2128 i = fill_sg_comp_from_iov(gather_comp, i,
2132 if (unlikely(size)) {
2133 CPT_LOG_DP_ERR("Insufficient buffer space,"
2134 " size %d needed", size);
2138 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2139 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2142 * Output Scatter List
2146 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2149 /* IV in SLIST only for F8 */
2155 i = fill_sg_comp(scatter_comp, i,
2156 offset_dma + OFF_CTRL_LEN,
2160 /* Add output data */
2161 if (req_flags & VALID_MAC_BUF) {
2162 size = outputlen - iv_len - mac_len;
2164 i = fill_sg_comp_from_iov(scatter_comp, i,
2168 if (unlikely(size)) {
2169 CPT_LOG_DP_ERR("Insufficient buffer space,"
2170 " size %d needed", size);
2177 i = fill_sg_comp_from_buf(scatter_comp, i,
2181 /* Output including mac */
2182 size = outputlen - iv_len;
2184 i = fill_sg_comp_from_iov(scatter_comp, i,
2188 if (unlikely(size)) {
2189 CPT_LOG_DP_ERR("Insufficient buffer space,"
2190 " size %d needed", size);
2195 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2196 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2198 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2200 /* This is DPTR len incase of SG mode */
2201 vq_cmd_w0.s.dlen = size;
2203 m_vaddr = (uint8_t *)m_vaddr + size;
2206 /* cpt alternate completion address saved earlier */
2207 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2208 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2209 rptr_dma = c_dma - 8;
2211 req->ist.ei1 = dptr_dma;
2212 req->ist.ei2 = rptr_dma;
2216 vq_cmd_w3.s.grp = 0;
2217 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2218 offsetof(struct cpt_ctx, k_ctx);
2220 /* 16 byte aligned cpt res address */
2221 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2222 *req->completion_addr = COMPLETION_CODE_INIT;
2223 req->comp_baddr = c_dma;
2225 /* Fill microcode part of instruction */
2226 req->ist.ei0 = vq_cmd_w0.u64;
2227 req->ist.ei3 = vq_cmd_w3.u64;
2235 static __rte_always_inline void
2236 cpt_kasumi_dec_prep(uint64_t d_offs,
2238 fc_params_t *params,
2243 int32_t inputlen = 0, outputlen;
2244 struct cpt_ctx *cpt_ctx;
2245 uint8_t i = 0, iv_len = 8;
2246 struct cpt_request_info *req;
2248 uint32_t encr_offset;
2249 uint32_t encr_data_len;
2252 void *m_vaddr, *c_vaddr;
2253 uint64_t m_dma, c_dma;
2254 uint64_t *offset_vaddr, offset_dma;
2255 vq_cmd_word0_t vq_cmd_w0;
2256 vq_cmd_word3_t vq_cmd_w3;
2257 opcode_info_t opcode;
2259 uint32_t g_size_bytes, s_size_bytes;
2260 uint64_t dptr_dma, rptr_dma;
2261 sg_comp_t *gather_comp;
2262 sg_comp_t *scatter_comp;
2264 buf_p = ¶ms->meta_buf;
2265 m_vaddr = buf_p->vaddr;
2266 m_dma = buf_p->dma_addr;
2268 encr_offset = ENCR_OFFSET(d_offs) / 8;
2269 encr_data_len = ENCR_DLEN(d_lens);
2271 cpt_ctx = params->ctx_buf.vaddr;
2272 flags = cpt_ctx->zsk_flags;
2274 * Save initial space that followed app data for completion code &
2275 * alternate completion code to fall in same cache line as app data
2277 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2278 m_dma += COMPLETION_CODE_SIZE;
2279 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2282 c_vaddr = (uint8_t *)m_vaddr + size;
2283 c_dma = m_dma + size;
2284 size += sizeof(cpt_res_s_t);
2286 m_vaddr = (uint8_t *)m_vaddr + size;
2289 /* Reserve memory for cpt request info */
2292 size = sizeof(struct cpt_request_info);
2293 m_vaddr = (uint8_t *)m_vaddr + size;
2296 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2298 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2299 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2300 (dir << 4) | (0 << 3) | (flags & 0x7));
2303 * GP op header, lengths are expected in bits.
2306 vq_cmd_w0.s.param1 = encr_data_len;
2307 vq_cmd_w0.s.opcode = opcode.flags;
2309 /* consider iv len */
2310 encr_offset += iv_len;
2312 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2313 outputlen = inputlen;
2315 /* save space for offset ctrl & iv */
2316 offset_vaddr = m_vaddr;
2319 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2320 m_dma += OFF_CTRL_LEN + iv_len;
2322 /* DPTR has SG list */
2323 in_buffer = m_vaddr;
2326 ((uint16_t *)in_buffer)[0] = 0;
2327 ((uint16_t *)in_buffer)[1] = 0;
2329 /* TODO Add error check if space will be sufficient */
2330 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2337 /* Offset control word followed by iv */
2338 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2340 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2343 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2344 params->iv_buf, iv_len);
2346 /* Add input data */
2347 size = inputlen - iv_len;
2349 i = fill_sg_comp_from_iov(gather_comp, i,
2352 if (unlikely(size)) {
2353 CPT_LOG_DP_ERR("Insufficient buffer space,"
2354 " size %d needed", size);
2358 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2359 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2362 * Output Scatter List
2366 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2369 i = fill_sg_comp(scatter_comp, i,
2370 offset_dma + OFF_CTRL_LEN,
2373 /* Add output data */
2374 size = outputlen - iv_len;
2376 i = fill_sg_comp_from_iov(scatter_comp, i,
2379 if (unlikely(size)) {
2380 CPT_LOG_DP_ERR("Insufficient buffer space,"
2381 " size %d needed", size);
2385 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2386 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2388 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2390 /* This is DPTR len incase of SG mode */
2391 vq_cmd_w0.s.dlen = size;
2393 m_vaddr = (uint8_t *)m_vaddr + size;
2396 /* cpt alternate completion address saved earlier */
2397 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2398 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2399 rptr_dma = c_dma - 8;
2401 req->ist.ei1 = dptr_dma;
2402 req->ist.ei2 = rptr_dma;
2406 vq_cmd_w3.s.grp = 0;
2407 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2408 offsetof(struct cpt_ctx, k_ctx);
2410 /* 16 byte aligned cpt res address */
2411 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2412 *req->completion_addr = COMPLETION_CODE_INIT;
2413 req->comp_baddr = c_dma;
2415 /* Fill microcode part of instruction */
2416 req->ist.ei0 = vq_cmd_w0.u64;
2417 req->ist.ei3 = vq_cmd_w3.u64;
2425 static __rte_always_inline void *
2426 cpt_fc_dec_hmac_prep(uint32_t flags,
2429 fc_params_t *fc_params,
2432 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2434 void *prep_req = NULL;
2436 fc_type = ctx->fc_type;
2438 if (likely(fc_type == FC_GEN)) {
2439 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2441 } else if (fc_type == ZUC_SNOW3G) {
2442 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2444 } else if (fc_type == KASUMI) {
2445 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2449 * For AUTH_ONLY case,
2450 * MC only supports digest generation and verification
2451 * should be done in software by memcmp()
2457 static __rte_always_inline void *__hot
2458 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2459 fc_params_t *fc_params, void *op)
2461 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2463 void *prep_req = NULL;
2465 fc_type = ctx->fc_type;
2467 /* Common api for rest of the ops */
2468 if (likely(fc_type == FC_GEN)) {
2469 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2471 } else if (fc_type == ZUC_SNOW3G) {
2472 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2474 } else if (fc_type == KASUMI) {
2475 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2477 } else if (fc_type == HASH_HMAC) {
2478 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2484 static __rte_always_inline int
2485 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2486 uint16_t key_len, uint16_t mac_len)
2488 struct cpt_ctx *cpt_ctx = ctx;
2489 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2491 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2496 /* No support for AEAD yet */
2497 if (cpt_ctx->enc_cipher)
2499 /* For ZUC/SNOW3G/Kasumi */
2502 cpt_ctx->snow3g = 1;
2503 gen_key_snow3g(key, keyx);
2504 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2505 cpt_ctx->fc_type = ZUC_SNOW3G;
2506 cpt_ctx->zsk_flags = 0x1;
2509 cpt_ctx->snow3g = 0;
2510 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2511 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2512 cpt_ctx->fc_type = ZUC_SNOW3G;
2513 cpt_ctx->zsk_flags = 0x1;
2516 /* Kasumi ECB mode */
2518 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2519 cpt_ctx->fc_type = KASUMI;
2520 cpt_ctx->zsk_flags = 0x1;
2523 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2524 cpt_ctx->fc_type = KASUMI;
2525 cpt_ctx->zsk_flags = 0x1;
2530 cpt_ctx->mac_len = 4;
2531 cpt_ctx->hash_type = type;
2535 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2536 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2537 cpt_ctx->fc_type = HASH_HMAC;
2540 /* For GMAC auth, cipher must be NULL */
2541 if (type == GMAC_TYPE)
2542 fctx->enc.enc_cipher = 0;
2544 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2545 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2549 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2550 memcpy(cpt_ctx->auth_key, key, key_len);
2551 cpt_ctx->auth_key_len = key_len;
2552 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2553 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2554 memcpy(fctx->hmac.opad, key, key_len);
2555 fctx->enc.auth_input_type = 1;
2560 static __rte_always_inline int
2561 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2562 struct cpt_sess_misc *sess)
2564 struct rte_crypto_aead_xform *aead_form;
2565 cipher_type_t enc_type = 0; /* NULL Cipher type */
2566 auth_type_t auth_type = 0; /* NULL Auth type */
2567 uint32_t cipher_key_len = 0;
2568 uint8_t aes_gcm = 0;
2569 aead_form = &xform->aead;
2570 void *ctx = SESS_PRIV(sess);
2572 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2573 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2574 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2575 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2576 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2577 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2578 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2579 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2581 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2584 switch (aead_form->algo) {
2585 case RTE_CRYPTO_AEAD_AES_GCM:
2587 cipher_key_len = 16;
2590 case RTE_CRYPTO_AEAD_AES_CCM:
2591 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2595 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2599 if (aead_form->key.length < cipher_key_len) {
2600 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2601 (unsigned int long)aead_form->key.length);
2605 sess->aes_gcm = aes_gcm;
2606 sess->mac_len = aead_form->digest_length;
2607 sess->iv_offset = aead_form->iv.offset;
2608 sess->iv_length = aead_form->iv.length;
2609 sess->aad_length = aead_form->aad_length;
2611 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2612 aead_form->key.length, NULL);
2614 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2619 static __rte_always_inline int
2620 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2621 struct cpt_sess_misc *sess)
2623 struct rte_crypto_cipher_xform *c_form;
2624 cipher_type_t enc_type = 0; /* NULL Cipher type */
2625 uint32_t cipher_key_len = 0;
2626 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2628 c_form = &xform->cipher;
2630 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2631 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2632 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2633 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2635 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2639 switch (c_form->algo) {
2640 case RTE_CRYPTO_CIPHER_AES_CBC:
2642 cipher_key_len = 16;
2644 case RTE_CRYPTO_CIPHER_3DES_CBC:
2645 enc_type = DES3_CBC;
2646 cipher_key_len = 24;
2648 case RTE_CRYPTO_CIPHER_DES_CBC:
2649 /* DES is implemented using 3DES in hardware */
2650 enc_type = DES3_CBC;
2653 case RTE_CRYPTO_CIPHER_AES_CTR:
2655 cipher_key_len = 16;
2658 case RTE_CRYPTO_CIPHER_NULL:
2662 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2663 enc_type = KASUMI_F8_ECB;
2664 cipher_key_len = 16;
2667 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2668 enc_type = SNOW3G_UEA2;
2669 cipher_key_len = 16;
2672 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2673 enc_type = ZUC_EEA3;
2674 cipher_key_len = 16;
2677 case RTE_CRYPTO_CIPHER_AES_XTS:
2679 cipher_key_len = 16;
2681 case RTE_CRYPTO_CIPHER_3DES_ECB:
2682 enc_type = DES3_ECB;
2683 cipher_key_len = 24;
2685 case RTE_CRYPTO_CIPHER_AES_ECB:
2687 cipher_key_len = 16;
2689 case RTE_CRYPTO_CIPHER_3DES_CTR:
2690 case RTE_CRYPTO_CIPHER_AES_F8:
2691 case RTE_CRYPTO_CIPHER_ARC4:
2692 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2696 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2701 if (c_form->key.length < cipher_key_len) {
2702 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2703 (unsigned long) c_form->key.length);
2707 sess->zsk_flag = zsk_flag;
2709 sess->aes_ctr = aes_ctr;
2710 sess->iv_offset = c_form->iv.offset;
2711 sess->iv_length = c_form->iv.length;
2712 sess->is_null = is_null;
2714 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2715 c_form->key.length, NULL);
2720 static __rte_always_inline int
2721 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2722 struct cpt_sess_misc *sess)
2724 struct rte_crypto_auth_xform *a_form;
2725 auth_type_t auth_type = 0; /* NULL Auth type */
2726 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2728 a_form = &xform->auth;
2730 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2731 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2732 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2733 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2735 CPT_LOG_DP_ERR("Unknown auth operation");
2739 if (a_form->key.length > 64) {
2740 CPT_LOG_DP_ERR("Auth key length is big");
2744 switch (a_form->algo) {
2745 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2747 case RTE_CRYPTO_AUTH_SHA1:
2748 auth_type = SHA1_TYPE;
2750 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2751 case RTE_CRYPTO_AUTH_SHA256:
2752 auth_type = SHA2_SHA256;
2754 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2755 case RTE_CRYPTO_AUTH_SHA512:
2756 auth_type = SHA2_SHA512;
2758 case RTE_CRYPTO_AUTH_AES_GMAC:
2759 auth_type = GMAC_TYPE;
2762 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2763 case RTE_CRYPTO_AUTH_SHA224:
2764 auth_type = SHA2_SHA224;
2766 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2767 case RTE_CRYPTO_AUTH_SHA384:
2768 auth_type = SHA2_SHA384;
2770 case RTE_CRYPTO_AUTH_MD5_HMAC:
2771 case RTE_CRYPTO_AUTH_MD5:
2772 auth_type = MD5_TYPE;
2774 case RTE_CRYPTO_AUTH_KASUMI_F9:
2775 auth_type = KASUMI_F9_ECB;
2777 * Indicate that direction needs to be taken out
2782 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2783 auth_type = SNOW3G_UIA2;
2786 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2787 auth_type = ZUC_EIA3;
2790 case RTE_CRYPTO_AUTH_NULL:
2794 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2795 case RTE_CRYPTO_AUTH_AES_CMAC:
2796 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2797 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2801 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2806 sess->zsk_flag = zsk_flag;
2807 sess->aes_gcm = aes_gcm;
2808 sess->mac_len = a_form->digest_length;
2809 sess->is_null = is_null;
2811 sess->auth_iv_offset = a_form->iv.offset;
2812 sess->auth_iv_length = a_form->iv.length;
2814 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2815 a_form->key.length, a_form->digest_length);
2820 static __rte_always_inline int
2821 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2822 struct cpt_sess_misc *sess)
2824 struct rte_crypto_auth_xform *a_form;
2825 cipher_type_t enc_type = 0; /* NULL Cipher type */
2826 auth_type_t auth_type = 0; /* NULL Auth type */
2827 void *ctx = SESS_PRIV(sess);
2829 a_form = &xform->auth;
2831 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2832 sess->cpt_op |= CPT_OP_ENCODE;
2833 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2834 sess->cpt_op |= CPT_OP_DECODE;
2836 CPT_LOG_DP_ERR("Unknown auth operation");
2840 switch (a_form->algo) {
2841 case RTE_CRYPTO_AUTH_AES_GMAC:
2843 auth_type = GMAC_TYPE;
2846 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2854 sess->iv_offset = a_form->iv.offset;
2855 sess->iv_length = a_form->iv.length;
2856 sess->mac_len = a_form->digest_length;
2858 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2859 a_form->key.length, NULL);
2860 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2865 static __rte_always_inline void *
2866 alloc_op_meta(struct rte_mbuf *m_src,
2869 struct rte_mempool *cpt_meta_pool)
2873 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2874 if (likely(m_src && (m_src->nb_segs == 1))) {
2878 /* Check if tailroom is sufficient to hold meta data */
2879 tailroom = rte_pktmbuf_tailroom(m_src);
2880 if (likely(tailroom > len + 8)) {
2881 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2882 mphys = m_src->buf_physaddr + m_src->buf_len;
2886 buf->dma_addr = mphys;
2888 /* Indicate that this is a mbuf allocated mdata */
2889 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2894 RTE_SET_USED(m_src);
2897 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2901 buf->dma_addr = rte_mempool_virt2iova(mdata);
2908 * cpt_free_metabuf - free metabuf to mempool.
2909 * @param instance: pointer to instance.
2910 * @param objp: pointer to the metabuf.
2912 static __rte_always_inline void
2913 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2915 bool nofree = ((uintptr_t)mdata & 1ull);
2919 rte_mempool_put(cpt_meta_pool, mdata);
2922 static __rte_always_inline uint32_t
2923 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2924 iov_ptr_t *iovec, uint32_t start_offset)
2927 void *seg_data = NULL;
2928 phys_addr_t seg_phys;
2929 int32_t seg_size = 0;
2936 if (!start_offset) {
2937 seg_data = rte_pktmbuf_mtod(pkt, void *);
2938 seg_phys = rte_pktmbuf_mtophys(pkt);
2939 seg_size = pkt->data_len;
2941 while (start_offset >= pkt->data_len) {
2942 start_offset -= pkt->data_len;
2946 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2947 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
2948 seg_size = pkt->data_len - start_offset;
2954 iovec->bufs[index].vaddr = seg_data;
2955 iovec->bufs[index].dma_addr = seg_phys;
2956 iovec->bufs[index].size = seg_size;
2960 while (unlikely(pkt != NULL)) {
2961 seg_data = rte_pktmbuf_mtod(pkt, void *);
2962 seg_phys = rte_pktmbuf_mtophys(pkt);
2963 seg_size = pkt->data_len;
2967 iovec->bufs[index].vaddr = seg_data;
2968 iovec->bufs[index].dma_addr = seg_phys;
2969 iovec->bufs[index].size = seg_size;
2976 iovec->buf_cnt = index;
2980 static __rte_always_inline uint32_t
2981 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2986 void *seg_data = NULL;
2987 phys_addr_t seg_phys;
2988 uint32_t seg_size = 0;
2991 seg_data = rte_pktmbuf_mtod(pkt, void *);
2992 seg_phys = rte_pktmbuf_mtophys(pkt);
2993 seg_size = pkt->data_len;
2996 if (likely(!pkt->next)) {
2997 uint32_t headroom, tailroom;
2999 *flags |= SINGLE_BUF_INPLACE;
3000 headroom = rte_pktmbuf_headroom(pkt);
3001 tailroom = rte_pktmbuf_tailroom(pkt);
3002 if (likely((headroom >= 24) &&
3004 /* In 83XX this is prerequivisit for Direct mode */
3005 *flags |= SINGLE_BUF_HEADTAILROOM;
3007 param->bufs[0].vaddr = seg_data;
3008 param->bufs[0].dma_addr = seg_phys;
3009 param->bufs[0].size = seg_size;
3012 iovec = param->src_iov;
3013 iovec->bufs[index].vaddr = seg_data;
3014 iovec->bufs[index].dma_addr = seg_phys;
3015 iovec->bufs[index].size = seg_size;
3019 while (unlikely(pkt != NULL)) {
3020 seg_data = rte_pktmbuf_mtod(pkt, void *);
3021 seg_phys = rte_pktmbuf_mtophys(pkt);
3022 seg_size = pkt->data_len;
3027 iovec->bufs[index].vaddr = seg_data;
3028 iovec->bufs[index].dma_addr = seg_phys;
3029 iovec->bufs[index].size = seg_size;
3036 iovec->buf_cnt = index;
3040 static __rte_always_inline int
3041 fill_fc_params(struct rte_crypto_op *cop,
3042 struct cpt_sess_misc *sess_misc,
3043 struct cpt_qp_meta_info *m_info,
3048 struct rte_crypto_sym_op *sym_op = cop->sym;
3051 uint32_t mc_hash_off;
3053 uint64_t d_offs, d_lens;
3054 struct rte_mbuf *m_src, *m_dst;
3055 uint8_t cpt_op = sess_misc->cpt_op;
3056 #ifdef CPT_ALWAYS_USE_SG_MODE
3057 uint8_t inplace = 0;
3059 uint8_t inplace = 1;
3061 fc_params_t fc_params;
3062 char src[SRC_IOV_SIZE];
3063 char dst[SRC_IOV_SIZE];
3067 if (likely(sess_misc->iv_length)) {
3068 flags |= VALID_IV_BUF;
3069 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3070 uint8_t *, sess_misc->iv_offset);
3071 if (sess_misc->aes_ctr &&
3072 unlikely(sess_misc->iv_length != 16)) {
3073 memcpy((uint8_t *)iv_buf,
3074 rte_crypto_op_ctod_offset(cop,
3075 uint8_t *, sess_misc->iv_offset), 12);
3076 iv_buf[3] = rte_cpu_to_be_32(0x1);
3077 fc_params.iv_buf = iv_buf;
3081 if (sess_misc->zsk_flag) {
3082 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3084 sess_misc->auth_iv_offset);
3085 if (sess_misc->zsk_flag != ZS_EA)
3088 m_src = sym_op->m_src;
3089 m_dst = sym_op->m_dst;
3091 if (sess_misc->aes_gcm) {
3096 d_offs = sym_op->aead.data.offset;
3097 d_lens = sym_op->aead.data.length;
3098 mc_hash_off = sym_op->aead.data.offset +
3099 sym_op->aead.data.length;
3101 aad_data = sym_op->aead.aad.data;
3102 aad_len = sess_misc->aad_length;
3103 if (likely((aad_data + aad_len) ==
3104 rte_pktmbuf_mtod_offset(m_src,
3106 sym_op->aead.data.offset))) {
3107 d_offs = (d_offs - aad_len) | (d_offs << 16);
3108 d_lens = (d_lens + aad_len) | (d_lens << 32);
3110 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3111 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3112 fc_params.aad_buf.size = aad_len;
3113 flags |= VALID_AAD_BUF;
3115 d_offs = d_offs << 16;
3116 d_lens = d_lens << 32;
3119 salt = fc_params.iv_buf;
3120 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3121 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3122 sess_misc->salt = *(uint32_t *)salt;
3124 fc_params.iv_buf = salt + 4;
3125 if (likely(sess_misc->mac_len)) {
3126 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3132 /* hmac immediately following data is best case */
3133 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3135 (uint8_t *)sym_op->aead.digest.data)) {
3136 flags |= VALID_MAC_BUF;
3137 fc_params.mac_buf.size = sess_misc->mac_len;
3138 fc_params.mac_buf.vaddr =
3139 sym_op->aead.digest.data;
3140 fc_params.mac_buf.dma_addr =
3141 sym_op->aead.digest.phys_addr;
3146 d_offs = sym_op->cipher.data.offset;
3147 d_lens = sym_op->cipher.data.length;
3148 mc_hash_off = sym_op->cipher.data.offset +
3149 sym_op->cipher.data.length;
3150 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3151 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3153 if (mc_hash_off < (sym_op->auth.data.offset +
3154 sym_op->auth.data.length)){
3155 mc_hash_off = (sym_op->auth.data.offset +
3156 sym_op->auth.data.length);
3158 /* for gmac, salt should be updated like in gcm */
3159 if (unlikely(sess_misc->is_gmac)) {
3161 salt = fc_params.iv_buf;
3162 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3163 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3164 sess_misc->salt = *(uint32_t *)salt;
3166 fc_params.iv_buf = salt + 4;
3168 if (likely(sess_misc->mac_len)) {
3171 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3175 /* hmac immediately following data is best case */
3176 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3178 (uint8_t *)sym_op->auth.digest.data)) {
3179 flags |= VALID_MAC_BUF;
3180 fc_params.mac_buf.size =
3182 fc_params.mac_buf.vaddr =
3183 sym_op->auth.digest.data;
3184 fc_params.mac_buf.dma_addr =
3185 sym_op->auth.digest.phys_addr;
3190 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3191 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3193 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3196 if (likely(!m_dst && inplace)) {
3197 /* Case of single buffer without AAD buf or
3198 * separate mac buf in place and
3201 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3203 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3206 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3212 /* Out of place processing */
3213 fc_params.src_iov = (void *)src;
3214 fc_params.dst_iov = (void *)dst;
3216 /* Store SG I/O in the api for reuse */
3217 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3218 CPT_LOG_DP_ERR("Prepare src iov failed");
3223 if (unlikely(m_dst != NULL)) {
3226 /* Try to make room as much as src has */
3227 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3229 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3230 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3231 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3232 CPT_LOG_DP_ERR("Not enough space in "
3241 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3242 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3248 fc_params.dst_iov = (void *)src;
3252 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3253 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3254 m_info->lb_mlen, m_info->pool);
3256 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3257 m_info->sg_mlen, m_info->pool);
3259 if (unlikely(mdata == NULL)) {
3260 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3265 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3266 op[0] = (uintptr_t)mdata;
3267 op[1] = (uintptr_t)cop;
3268 op[2] = op[3] = 0; /* Used to indicate auth verify */
3269 space += 4 * sizeof(uint64_t);
3271 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3272 fc_params.meta_buf.dma_addr += space;
3273 fc_params.meta_buf.size -= space;
3275 /* Finally prepare the instruction */
3276 if (cpt_op & CPT_OP_ENCODE)
3277 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3280 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3283 if (unlikely(*prep_req == NULL)) {
3284 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3286 goto free_mdata_and_exit;
3293 free_mdata_and_exit:
3294 free_op_meta(mdata, m_info->pool);
3299 static __rte_always_inline void
3300 compl_auth_verify(struct rte_crypto_op *op,
3305 struct rte_crypto_sym_op *sym_op = op->sym;
3307 if (sym_op->auth.digest.data)
3308 mac = sym_op->auth.digest.data;
3310 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3312 sym_op->auth.data.length +
3313 sym_op->auth.data.offset);
3315 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3319 if (memcmp(mac, gen_mac, mac_len))
3320 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3322 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3325 static __rte_always_inline int
3326 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3328 struct rte_crypto_sym_xform *chain;
3330 CPT_PMD_INIT_FUNC_TRACE();
3332 if (cpt_is_algo_supported(xform))
3337 switch (chain->type) {
3338 case RTE_CRYPTO_SYM_XFORM_AEAD:
3339 if (fill_sess_aead(chain, sess))
3342 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3343 if (fill_sess_cipher(chain, sess))
3346 case RTE_CRYPTO_SYM_XFORM_AUTH:
3347 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3348 if (fill_sess_gmac(chain, sess))
3351 if (fill_sess_auth(chain, sess))
3356 CPT_LOG_DP_ERR("Invalid crypto xform type");
3359 chain = chain->next;
3368 static __rte_always_inline void
3369 find_kasumif9_direction_and_length(uint8_t *src,
3370 uint32_t counter_num_bytes,
3371 uint32_t *addr_length_in_bits,
3372 uint8_t *addr_direction)
3377 while (!found && counter_num_bytes > 0) {
3378 counter_num_bytes--;
3379 if (src[counter_num_bytes] == 0x00)
3381 pos = rte_bsf32(src[counter_num_bytes]);
3383 if (likely(counter_num_bytes > 0)) {
3384 last_byte = src[counter_num_bytes - 1];
3385 *addr_direction = last_byte & 0x1;
3386 *addr_length_in_bits = counter_num_bytes * 8
3390 last_byte = src[counter_num_bytes];
3391 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3392 *addr_length_in_bits = counter_num_bytes * 8
3400 * This handles all auth only except AES_GMAC
3402 static __rte_always_inline int
3403 fill_digest_params(struct rte_crypto_op *cop,
3404 struct cpt_sess_misc *sess,
3405 struct cpt_qp_meta_info *m_info,
3410 struct rte_crypto_sym_op *sym_op = cop->sym;
3414 uint32_t auth_range_off;
3416 uint64_t d_offs = 0, d_lens;
3417 struct rte_mbuf *m_src, *m_dst;
3418 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3419 uint16_t mac_len = sess->mac_len;
3421 char src[SRC_IOV_SIZE];
3425 memset(¶ms, 0, sizeof(fc_params_t));
3427 m_src = sym_op->m_src;
3429 /* For just digest lets force mempool alloc */
3430 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3432 if (mdata == NULL) {
3437 mphys = params.meta_buf.dma_addr;
3440 op[0] = (uintptr_t)mdata;
3441 op[1] = (uintptr_t)cop;
3442 op[2] = op[3] = 0; /* Used to indicate auth verify */
3443 space += 4 * sizeof(uint64_t);
3445 auth_range_off = sym_op->auth.data.offset;
3447 flags = VALID_MAC_BUF;
3448 params.src_iov = (void *)src;
3449 if (unlikely(sess->zsk_flag)) {
3451 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3452 * we will send pass through even for auth only case,
3455 d_offs = auth_range_off;
3457 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3458 uint8_t *, sess->auth_iv_offset);
3459 if (sess->zsk_flag == K_F9) {
3460 uint32_t length_in_bits, num_bytes;
3461 uint8_t *src, direction = 0;
3463 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3466 * This is kasumi f9, take direction from
3469 length_in_bits = cop->sym->auth.data.length;
3470 num_bytes = (length_in_bits >> 3);
3471 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3472 find_kasumif9_direction_and_length(src,
3476 length_in_bits -= 64;
3477 cop->sym->auth.data.offset += 64;
3478 d_offs = cop->sym->auth.data.offset;
3479 auth_range_off = d_offs / 8;
3480 cop->sym->auth.data.length = length_in_bits;
3482 /* Store it at end of auth iv */
3483 iv_buf[8] = direction;
3484 params.auth_iv_buf = iv_buf;
3488 d_lens = sym_op->auth.data.length;
3490 params.ctx_buf.vaddr = SESS_PRIV(sess);
3491 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3493 if (auth_op == CPT_OP_AUTH_GENERATE) {
3494 if (sym_op->auth.digest.data) {
3496 * Digest to be generated
3497 * in separate buffer
3499 params.mac_buf.size =
3501 params.mac_buf.vaddr =
3502 sym_op->auth.digest.data;
3503 params.mac_buf.dma_addr =
3504 sym_op->auth.digest.phys_addr;
3506 uint32_t off = sym_op->auth.data.offset +
3507 sym_op->auth.data.length;
3508 int32_t dlen, space;
3510 m_dst = sym_op->m_dst ?
3511 sym_op->m_dst : sym_op->m_src;
3512 dlen = rte_pktmbuf_pkt_len(m_dst);
3514 space = off + mac_len - dlen;
3516 if (!rte_pktmbuf_append(m_dst, space)) {
3517 CPT_LOG_DP_ERR("Failed to extend "
3518 "mbuf by %uB", space);
3520 goto free_mdata_and_exit;
3523 params.mac_buf.vaddr =
3524 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3525 params.mac_buf.dma_addr =
3526 rte_pktmbuf_mtophys_offset(m_dst, off);
3527 params.mac_buf.size = mac_len;
3530 /* Need space for storing generated mac */
3531 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3532 params.mac_buf.dma_addr = mphys + space;
3533 params.mac_buf.size = mac_len;
3534 space += RTE_ALIGN_CEIL(mac_len, 8);
3535 op[2] = (uintptr_t)params.mac_buf.vaddr;
3539 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3540 params.meta_buf.dma_addr = mphys + space;
3541 params.meta_buf.size -= space;
3543 /* Out of place processing */
3544 params.src_iov = (void *)src;
3546 /*Store SG I/O in the api for reuse */
3547 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3548 CPT_LOG_DP_ERR("Prepare src iov failed");
3550 goto free_mdata_and_exit;
3553 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3554 if (unlikely(*prep_req == NULL)) {
3556 goto free_mdata_and_exit;
3563 free_mdata_and_exit:
3564 free_op_meta(mdata, m_info->pool);
3569 #endif /*_CPT_UCODE_H_ */