1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
108 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
113 key_len = key_len / 2;
114 if (unlikely(key_len == CPT_BYTE_24)) {
115 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
118 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
124 if (unlikely(key_len != 16))
126 /* No support for AEAD yet */
127 if (unlikely(ctx->hash_type))
129 fc_type = ZUC_SNOW3G;
133 if (unlikely(key_len != 16))
135 /* No support for AEAD yet */
136 if (unlikely(ctx->hash_type))
144 ctx->fc_type = fc_type;
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 fctx->enc.enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 fctx->enc.aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->zsk_flags = 0;
198 static __rte_always_inline void
199 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
203 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
204 cpt_ctx->zsk_flags = 0;
207 static __rte_always_inline void
208 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
211 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
212 cpt_ctx->zsk_flags = 0;
215 static __rte_always_inline int
216 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
217 uint16_t key_len, uint8_t *salt)
219 struct cpt_ctx *cpt_ctx = ctx;
220 mc_fc_context_t *fctx = &cpt_ctx->fctx;
223 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
227 if (cpt_ctx->fc_type == FC_GEN) {
229 * We need to always say IV is from DPTR as user can
230 * sometimes iverride IV per operation.
232 fctx->enc.iv_source = CPT_FROM_DPTR;
234 if (cpt_ctx->auth_key_len > 64)
240 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
243 /* CPT performs DES using 3DES with the 8B DES-key
244 * replicated 2 more times to match the 24B 3DES-key.
245 * Eg. If org. key is "0x0a 0x0b", then new key is
246 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
249 /* Skipping the first 8B as it will be copied
250 * in the regular code flow
252 memcpy(fctx->enc.encr_key+key_len, key, key_len);
253 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
257 /* For DES3_ECB IV need to be from CTX. */
258 fctx->enc.iv_source = CPT_FROM_CTX;
264 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
267 /* Even though iv source is from dptr,
268 * aes_gcm salt is taken from ctx
271 memcpy(fctx->enc.encr_iv, salt, 4);
272 /* Assuming it was just salt update
278 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
281 key_len = key_len / 2;
282 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
284 /* Copy key2 for XTS into ipad */
285 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
286 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
289 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
292 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
295 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
298 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
304 /* Only for FC_GEN case */
306 /* For GMAC auth, cipher must be NULL */
307 if (cpt_ctx->hash_type != GMAC_TYPE)
308 fctx->enc.enc_cipher = type;
310 memcpy(fctx->enc.encr_key, key, key_len);
313 cpt_ctx->enc_cipher = type;
318 static __rte_always_inline uint32_t
319 fill_sg_comp(sg_comp_t *list,
321 phys_addr_t dma_addr,
324 sg_comp_t *to = &list[i>>2];
326 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
327 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
332 static __rte_always_inline uint32_t
333 fill_sg_comp_from_buf(sg_comp_t *list,
337 sg_comp_t *to = &list[i>>2];
339 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
340 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
345 static __rte_always_inline uint32_t
346 fill_sg_comp_from_buf_min(sg_comp_t *list,
351 sg_comp_t *to = &list[i >> 2];
352 uint32_t size = *psize;
355 e_len = (size > from->size) ? from->size : size;
356 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
357 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
364 * This fills the MC expected SGIO list
365 * from IOV given by user.
367 static __rte_always_inline uint32_t
368 fill_sg_comp_from_iov(sg_comp_t *list,
370 iov_ptr_t *from, uint32_t from_offset,
371 uint32_t *psize, buf_ptr_t *extra_buf,
372 uint32_t extra_offset)
375 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
376 uint32_t size = *psize - extra_len;
380 for (j = 0; (j < from->buf_cnt) && size; j++) {
381 phys_addr_t e_dma_addr;
383 sg_comp_t *to = &list[i >> 2];
388 if (unlikely(from_offset)) {
389 if (from_offset >= bufs[j].size) {
390 from_offset -= bufs[j].size;
393 e_dma_addr = bufs[j].dma_addr + from_offset;
394 e_len = (size > (bufs[j].size - from_offset)) ?
395 (bufs[j].size - from_offset) : size;
398 e_dma_addr = bufs[j].dma_addr;
399 e_len = (size > bufs[j].size) ?
403 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
404 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
406 if (extra_len && (e_len >= extra_offset)) {
407 /* Break the data at given offset */
408 uint32_t next_len = e_len - extra_offset;
409 phys_addr_t next_dma = e_dma_addr + extra_offset;
414 e_len = extra_offset;
416 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
419 /* Insert extra data ptr */
424 rte_cpu_to_be_16(extra_buf->size);
426 rte_cpu_to_be_64(extra_buf->dma_addr);
428 /* size already decremented by extra len */
431 /* insert the rest of the data */
435 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
436 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
445 extra_offset -= size;
453 static __rte_always_inline void
454 cpt_digest_gen_prep(uint32_t flags,
456 digest_params_t *params,
460 struct cpt_request_info *req;
462 uint16_t data_len, mac_len, key_len;
463 auth_type_t hash_type;
466 sg_comp_t *gather_comp;
467 sg_comp_t *scatter_comp;
469 uint32_t g_size_bytes, s_size_bytes;
470 uint64_t dptr_dma, rptr_dma;
471 vq_cmd_word0_t vq_cmd_w0;
472 vq_cmd_word3_t vq_cmd_w3;
473 void *c_vaddr, *m_vaddr;
474 uint64_t c_dma, m_dma;
475 opcode_info_t opcode;
477 ctx = params->ctx_buf.vaddr;
478 meta_p = ¶ms->meta_buf;
480 m_vaddr = meta_p->vaddr;
481 m_dma = meta_p->dma_addr;
484 * Save initial space that followed app data for completion code &
485 * alternate completion code to fall in same cache line as app data
487 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
488 m_dma += COMPLETION_CODE_SIZE;
489 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
491 c_vaddr = (uint8_t *)m_vaddr + size;
492 c_dma = m_dma + size;
493 size += sizeof(cpt_res_s_t);
495 m_vaddr = (uint8_t *)m_vaddr + size;
500 size = sizeof(struct cpt_request_info);
501 m_vaddr = (uint8_t *)m_vaddr + size;
504 hash_type = ctx->hash_type;
505 mac_len = ctx->mac_len;
506 key_len = ctx->auth_key_len;
507 data_len = AUTH_DLEN(d_lens);
511 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
513 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
514 vq_cmd_w0.s.param1 = key_len;
515 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
517 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
518 vq_cmd_w0.s.param1 = 0;
519 vq_cmd_w0.s.dlen = data_len;
524 /* Null auth only case enters the if */
525 if (unlikely(!hash_type && !ctx->enc_cipher)) {
526 opcode.s.major = CPT_MAJOR_OP_MISC;
527 /* Minor op is passthrough */
528 opcode.s.minor = 0x03;
529 /* Send out completion code only */
530 vq_cmd_w0.s.param2 = 0x1;
533 vq_cmd_w0.s.opcode = opcode.flags;
535 /* DPTR has SG list */
539 ((uint16_t *)in_buffer)[0] = 0;
540 ((uint16_t *)in_buffer)[1] = 0;
542 /* TODO Add error check if space will be sufficient */
543 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
552 uint64_t k_dma = params->ctx_buf.dma_addr +
553 offsetof(struct cpt_ctx, auth_key);
555 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
561 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
563 if (unlikely(size)) {
564 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
570 * Looks like we need to support zero data
571 * gather ptr in case of hash & hmac
575 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
576 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
583 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
585 if (flags & VALID_MAC_BUF) {
586 if (unlikely(params->mac_buf.size < mac_len)) {
587 CPT_LOG_DP_ERR("Insufficient MAC size");
592 i = fill_sg_comp_from_buf_min(scatter_comp, i,
593 ¶ms->mac_buf, &size);
596 i = fill_sg_comp_from_iov(scatter_comp, i,
597 params->src_iov, data_len,
599 if (unlikely(size)) {
600 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
606 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
607 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
609 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
611 /* This is DPTR len incase of SG mode */
612 vq_cmd_w0.s.dlen = size;
614 m_vaddr = (uint8_t *)m_vaddr + size;
617 /* cpt alternate completion address saved earlier */
618 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
619 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
620 rptr_dma = c_dma - 8;
622 req->ist.ei1 = dptr_dma;
623 req->ist.ei2 = rptr_dma;
628 /* 16 byte aligned cpt res address */
629 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
630 *req->completion_addr = COMPLETION_CODE_INIT;
631 req->comp_baddr = c_dma;
633 /* Fill microcode part of instruction */
634 req->ist.ei0 = vq_cmd_w0.u64;
635 req->ist.ei3 = vq_cmd_w3.u64;
643 static __rte_always_inline void
644 cpt_enc_hmac_prep(uint32_t flags,
647 fc_params_t *fc_params,
651 uint32_t iv_offset = 0;
652 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
653 struct cpt_ctx *cpt_ctx;
654 uint32_t cipher_type, hash_type;
655 uint32_t mac_len, size;
657 struct cpt_request_info *req;
658 buf_ptr_t *meta_p, *aad_buf = NULL;
659 uint32_t encr_offset, auth_offset;
660 uint32_t encr_data_len, auth_data_len, aad_len = 0;
661 uint32_t passthrough_len = 0;
662 void *m_vaddr, *offset_vaddr;
663 uint64_t m_dma, offset_dma, ctx_dma;
664 vq_cmd_word0_t vq_cmd_w0;
665 vq_cmd_word3_t vq_cmd_w3;
668 opcode_info_t opcode;
670 meta_p = &fc_params->meta_buf;
671 m_vaddr = meta_p->vaddr;
672 m_dma = meta_p->dma_addr;
674 encr_offset = ENCR_OFFSET(d_offs);
675 auth_offset = AUTH_OFFSET(d_offs);
676 encr_data_len = ENCR_DLEN(d_lens);
677 auth_data_len = AUTH_DLEN(d_lens);
678 if (unlikely(flags & VALID_AAD_BUF)) {
680 * We dont support both aad
681 * and auth data separately
685 aad_len = fc_params->aad_buf.size;
686 aad_buf = &fc_params->aad_buf;
688 cpt_ctx = fc_params->ctx_buf.vaddr;
689 cipher_type = cpt_ctx->enc_cipher;
690 hash_type = cpt_ctx->hash_type;
691 mac_len = cpt_ctx->mac_len;
694 * Save initial space that followed app data for completion code &
695 * alternate completion code to fall in same cache line as app data
697 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
698 m_dma += COMPLETION_CODE_SIZE;
699 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
702 c_vaddr = (uint8_t *)m_vaddr + size;
703 c_dma = m_dma + size;
704 size += sizeof(cpt_res_s_t);
706 m_vaddr = (uint8_t *)m_vaddr + size;
709 /* start cpt request info struct at 8 byte boundary */
710 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
713 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
715 size += sizeof(struct cpt_request_info);
716 m_vaddr = (uint8_t *)m_vaddr + size;
719 if (hash_type == GMAC_TYPE)
722 if (unlikely(!(flags & VALID_IV_BUF))) {
724 iv_offset = ENCR_IV_OFFSET(d_offs);
727 if (unlikely(flags & VALID_AAD_BUF)) {
729 * When AAD is given, data above encr_offset is pass through
730 * Since AAD is given as separate pointer and not as offset,
731 * this is a special case as we need to fragment input data
732 * into passthrough + encr_data and then insert AAD in between.
734 if (hash_type != GMAC_TYPE) {
735 passthrough_len = encr_offset;
736 auth_offset = passthrough_len + iv_len;
737 encr_offset = passthrough_len + aad_len + iv_len;
738 auth_data_len = aad_len + encr_data_len;
740 passthrough_len = 16 + aad_len;
741 auth_offset = passthrough_len + iv_len;
742 auth_data_len = aad_len;
745 encr_offset += iv_len;
746 auth_offset += iv_len;
750 opcode.s.major = CPT_MAJOR_OP_FC;
753 auth_dlen = auth_offset + auth_data_len;
754 enc_dlen = encr_data_len + encr_offset;
755 if (unlikely(encr_data_len & 0xf)) {
756 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
757 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
758 else if (likely((cipher_type == AES_CBC) ||
759 (cipher_type == AES_ECB)))
760 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
763 if (unlikely(hash_type == GMAC_TYPE)) {
764 encr_offset = auth_dlen;
768 if (unlikely(auth_dlen > enc_dlen)) {
769 inputlen = auth_dlen;
770 outputlen = auth_dlen + mac_len;
773 outputlen = enc_dlen + mac_len;
778 vq_cmd_w0.s.param1 = encr_data_len;
779 vq_cmd_w0.s.param2 = auth_data_len;
781 * In 83XX since we have a limitation of
782 * IV & Offset control word not part of instruction
783 * and need to be part of Data Buffer, we check if
784 * head room is there and then only do the Direct mode processing
786 if (likely((flags & SINGLE_BUF_INPLACE) &&
787 (flags & SINGLE_BUF_HEADTAILROOM))) {
788 void *dm_vaddr = fc_params->bufs[0].vaddr;
789 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
791 * This flag indicates that there is 24 bytes head room and
792 * 8 bytes tail room available, so that we get to do
793 * DIRECT MODE with limitation
796 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
797 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
800 req->ist.ei1 = offset_dma;
801 /* RPTR should just exclude offset control word */
802 req->ist.ei2 = dm_dma_addr - iv_len;
803 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
804 + outputlen - iv_len);
806 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
808 vq_cmd_w0.s.opcode = opcode.flags;
810 if (likely(iv_len)) {
811 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
813 uint64_t *src = fc_params->iv_buf;
818 *(uint64_t *)offset_vaddr =
819 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
820 ((uint64_t)iv_offset << 8) |
821 ((uint64_t)auth_offset));
824 uint32_t i, g_size_bytes, s_size_bytes;
825 uint64_t dptr_dma, rptr_dma;
826 sg_comp_t *gather_comp;
827 sg_comp_t *scatter_comp;
830 /* This falls under strict SG mode */
831 offset_vaddr = m_vaddr;
833 size = OFF_CTRL_LEN + iv_len;
835 m_vaddr = (uint8_t *)m_vaddr + size;
838 opcode.s.major |= CPT_DMA_MODE;
840 vq_cmd_w0.s.opcode = opcode.flags;
842 if (likely(iv_len)) {
843 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
845 uint64_t *src = fc_params->iv_buf;
850 *(uint64_t *)offset_vaddr =
851 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
852 ((uint64_t)iv_offset << 8) |
853 ((uint64_t)auth_offset));
855 /* DPTR has SG list */
859 ((uint16_t *)in_buffer)[0] = 0;
860 ((uint16_t *)in_buffer)[1] = 0;
862 /* TODO Add error check if space will be sufficient */
863 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
871 /* Offset control word that includes iv */
872 i = fill_sg_comp(gather_comp, i, offset_dma,
873 OFF_CTRL_LEN + iv_len);
876 size = inputlen - iv_len;
878 uint32_t aad_offset = aad_len ? passthrough_len : 0;
880 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
881 i = fill_sg_comp_from_buf_min(gather_comp, i,
885 i = fill_sg_comp_from_iov(gather_comp, i,
888 aad_buf, aad_offset);
891 if (unlikely(size)) {
892 CPT_LOG_DP_ERR("Insufficient buffer space,"
893 " size %d needed", size);
897 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
898 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
901 * Output Scatter list
905 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
908 if (likely(iv_len)) {
909 i = fill_sg_comp(scatter_comp, i,
910 offset_dma + OFF_CTRL_LEN,
914 /* output data or output data + digest*/
915 if (unlikely(flags & VALID_MAC_BUF)) {
916 size = outputlen - iv_len - mac_len;
918 uint32_t aad_offset =
919 aad_len ? passthrough_len : 0;
921 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
922 i = fill_sg_comp_from_buf_min(
928 i = fill_sg_comp_from_iov(scatter_comp,
936 if (unlikely(size)) {
937 CPT_LOG_DP_ERR("Insufficient buffer"
938 " space, size %d needed",
945 i = fill_sg_comp_from_buf(scatter_comp, i,
946 &fc_params->mac_buf);
949 /* Output including mac */
950 size = outputlen - iv_len;
952 uint32_t aad_offset =
953 aad_len ? passthrough_len : 0;
955 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
956 i = fill_sg_comp_from_buf_min(
962 i = fill_sg_comp_from_iov(scatter_comp,
970 if (unlikely(size)) {
971 CPT_LOG_DP_ERR("Insufficient buffer"
972 " space, size %d needed",
978 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
979 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
981 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
983 /* This is DPTR len incase of SG mode */
984 vq_cmd_w0.s.dlen = size;
986 m_vaddr = (uint8_t *)m_vaddr + size;
989 /* cpt alternate completion address saved earlier */
990 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
991 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
992 rptr_dma = c_dma - 8;
994 req->ist.ei1 = dptr_dma;
995 req->ist.ei2 = rptr_dma;
998 ctx_dma = fc_params->ctx_buf.dma_addr +
999 offsetof(struct cpt_ctx, fctx);
1002 vq_cmd_w3.s.grp = 0;
1003 vq_cmd_w3.s.cptr = ctx_dma;
1005 /* 16 byte aligned cpt res address */
1006 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1007 *req->completion_addr = COMPLETION_CODE_INIT;
1008 req->comp_baddr = c_dma;
1010 /* Fill microcode part of instruction */
1011 req->ist.ei0 = vq_cmd_w0.u64;
1012 req->ist.ei3 = vq_cmd_w3.u64;
1020 static __rte_always_inline void
1021 cpt_dec_hmac_prep(uint32_t flags,
1024 fc_params_t *fc_params,
1028 uint32_t iv_offset = 0, size;
1029 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1030 struct cpt_ctx *cpt_ctx;
1031 int32_t hash_type, mac_len;
1032 uint8_t iv_len = 16;
1033 struct cpt_request_info *req;
1034 buf_ptr_t *meta_p, *aad_buf = NULL;
1035 uint32_t encr_offset, auth_offset;
1036 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1037 uint32_t passthrough_len = 0;
1038 void *m_vaddr, *offset_vaddr;
1039 uint64_t m_dma, offset_dma, ctx_dma;
1040 opcode_info_t opcode;
1041 vq_cmd_word0_t vq_cmd_w0;
1042 vq_cmd_word3_t vq_cmd_w3;
1046 meta_p = &fc_params->meta_buf;
1047 m_vaddr = meta_p->vaddr;
1048 m_dma = meta_p->dma_addr;
1050 encr_offset = ENCR_OFFSET(d_offs);
1051 auth_offset = AUTH_OFFSET(d_offs);
1052 encr_data_len = ENCR_DLEN(d_lens);
1053 auth_data_len = AUTH_DLEN(d_lens);
1055 if (unlikely(flags & VALID_AAD_BUF)) {
1057 * We dont support both aad
1058 * and auth data separately
1062 aad_len = fc_params->aad_buf.size;
1063 aad_buf = &fc_params->aad_buf;
1066 cpt_ctx = fc_params->ctx_buf.vaddr;
1067 hash_type = cpt_ctx->hash_type;
1068 mac_len = cpt_ctx->mac_len;
1070 if (hash_type == GMAC_TYPE)
1073 if (unlikely(!(flags & VALID_IV_BUF))) {
1075 iv_offset = ENCR_IV_OFFSET(d_offs);
1078 if (unlikely(flags & VALID_AAD_BUF)) {
1080 * When AAD is given, data above encr_offset is pass through
1081 * Since AAD is given as separate pointer and not as offset,
1082 * this is a special case as we need to fragment input data
1083 * into passthrough + encr_data and then insert AAD in between.
1085 if (hash_type != GMAC_TYPE) {
1086 passthrough_len = encr_offset;
1087 auth_offset = passthrough_len + iv_len;
1088 encr_offset = passthrough_len + aad_len + iv_len;
1089 auth_data_len = aad_len + encr_data_len;
1091 passthrough_len = 16 + aad_len;
1092 auth_offset = passthrough_len + iv_len;
1093 auth_data_len = aad_len;
1096 encr_offset += iv_len;
1097 auth_offset += iv_len;
1101 * Save initial space that followed app data for completion code &
1102 * alternate completion code to fall in same cache line as app data
1104 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1105 m_dma += COMPLETION_CODE_SIZE;
1106 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1108 c_vaddr = (uint8_t *)m_vaddr + size;
1109 c_dma = m_dma + size;
1110 size += sizeof(cpt_res_s_t);
1112 m_vaddr = (uint8_t *)m_vaddr + size;
1115 /* start cpt request info structure at 8 byte alignment */
1116 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1119 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1121 size += sizeof(struct cpt_request_info);
1122 m_vaddr = (uint8_t *)m_vaddr + size;
1126 opcode.s.major = CPT_MAJOR_OP_FC;
1129 enc_dlen = encr_offset + encr_data_len;
1130 auth_dlen = auth_offset + auth_data_len;
1132 if (auth_dlen > enc_dlen) {
1133 inputlen = auth_dlen + mac_len;
1134 outputlen = auth_dlen;
1136 inputlen = enc_dlen + mac_len;
1137 outputlen = enc_dlen;
1140 if (hash_type == GMAC_TYPE)
1141 encr_offset = inputlen;
1144 vq_cmd_w0.s.param1 = encr_data_len;
1145 vq_cmd_w0.s.param2 = auth_data_len;
1148 * In 83XX since we have a limitation of
1149 * IV & Offset control word not part of instruction
1150 * and need to be part of Data Buffer, we check if
1151 * head room is there and then only do the Direct mode processing
1153 if (likely((flags & SINGLE_BUF_INPLACE) &&
1154 (flags & SINGLE_BUF_HEADTAILROOM))) {
1155 void *dm_vaddr = fc_params->bufs[0].vaddr;
1156 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1158 * This flag indicates that there is 24 bytes head room and
1159 * 8 bytes tail room available, so that we get to do
1160 * DIRECT MODE with limitation
1163 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1164 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1165 req->ist.ei1 = offset_dma;
1167 /* RPTR should just exclude offset control word */
1168 req->ist.ei2 = dm_dma_addr - iv_len;
1170 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1171 outputlen - iv_len);
1172 /* since this is decryption,
1173 * don't touch the content of
1174 * alternate ccode space as it contains
1178 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1180 vq_cmd_w0.s.opcode = opcode.flags;
1182 if (likely(iv_len)) {
1183 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1185 uint64_t *src = fc_params->iv_buf;
1190 *(uint64_t *)offset_vaddr =
1191 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1192 ((uint64_t)iv_offset << 8) |
1193 ((uint64_t)auth_offset));
1196 uint64_t dptr_dma, rptr_dma;
1197 uint32_t g_size_bytes, s_size_bytes;
1198 sg_comp_t *gather_comp;
1199 sg_comp_t *scatter_comp;
1203 /* This falls under strict SG mode */
1204 offset_vaddr = m_vaddr;
1206 size = OFF_CTRL_LEN + iv_len;
1208 m_vaddr = (uint8_t *)m_vaddr + size;
1211 opcode.s.major |= CPT_DMA_MODE;
1213 vq_cmd_w0.s.opcode = opcode.flags;
1215 if (likely(iv_len)) {
1216 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1218 uint64_t *src = fc_params->iv_buf;
1223 *(uint64_t *)offset_vaddr =
1224 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1225 ((uint64_t)iv_offset << 8) |
1226 ((uint64_t)auth_offset));
1228 /* DPTR has SG list */
1229 in_buffer = m_vaddr;
1232 ((uint16_t *)in_buffer)[0] = 0;
1233 ((uint16_t *)in_buffer)[1] = 0;
1235 /* TODO Add error check if space will be sufficient */
1236 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1243 /* Offset control word that includes iv */
1244 i = fill_sg_comp(gather_comp, i, offset_dma,
1245 OFF_CTRL_LEN + iv_len);
1247 /* Add input data */
1248 if (flags & VALID_MAC_BUF) {
1249 size = inputlen - iv_len - mac_len;
1251 /* input data only */
1252 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1253 i = fill_sg_comp_from_buf_min(
1258 uint32_t aad_offset = aad_len ?
1259 passthrough_len : 0;
1261 i = fill_sg_comp_from_iov(gather_comp,
1268 if (unlikely(size)) {
1269 CPT_LOG_DP_ERR("Insufficient buffer"
1270 " space, size %d needed",
1278 i = fill_sg_comp_from_buf(gather_comp, i,
1279 &fc_params->mac_buf);
1282 /* input data + mac */
1283 size = inputlen - iv_len;
1285 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1286 i = fill_sg_comp_from_buf_min(
1291 uint32_t aad_offset = aad_len ?
1292 passthrough_len : 0;
1294 if (unlikely(!fc_params->src_iov)) {
1295 CPT_LOG_DP_ERR("Bad input args");
1299 i = fill_sg_comp_from_iov(
1307 if (unlikely(size)) {
1308 CPT_LOG_DP_ERR("Insufficient buffer"
1309 " space, size %d needed",
1315 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1316 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1319 * Output Scatter List
1324 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1328 i = fill_sg_comp(scatter_comp, i,
1329 offset_dma + OFF_CTRL_LEN,
1333 /* Add output data */
1334 size = outputlen - iv_len;
1336 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1337 /* handle single buffer here */
1338 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1342 uint32_t aad_offset = aad_len ?
1343 passthrough_len : 0;
1345 if (unlikely(!fc_params->dst_iov)) {
1346 CPT_LOG_DP_ERR("Bad input args");
1350 i = fill_sg_comp_from_iov(scatter_comp, i,
1351 fc_params->dst_iov, 0,
1356 if (unlikely(size)) {
1357 CPT_LOG_DP_ERR("Insufficient buffer space,"
1358 " size %d needed", size);
1363 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1364 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1366 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1368 /* This is DPTR len incase of SG mode */
1369 vq_cmd_w0.s.dlen = size;
1371 m_vaddr = (uint8_t *)m_vaddr + size;
1374 /* cpt alternate completion address saved earlier */
1375 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1376 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1377 rptr_dma = c_dma - 8;
1378 size += COMPLETION_CODE_SIZE;
1380 req->ist.ei1 = dptr_dma;
1381 req->ist.ei2 = rptr_dma;
1384 ctx_dma = fc_params->ctx_buf.dma_addr +
1385 offsetof(struct cpt_ctx, fctx);
1388 vq_cmd_w3.s.grp = 0;
1389 vq_cmd_w3.s.cptr = ctx_dma;
1391 /* 16 byte aligned cpt res address */
1392 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1393 *req->completion_addr = COMPLETION_CODE_INIT;
1394 req->comp_baddr = c_dma;
1396 /* Fill microcode part of instruction */
1397 req->ist.ei0 = vq_cmd_w0.u64;
1398 req->ist.ei3 = vq_cmd_w3.u64;
1406 static __rte_always_inline void
1407 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1410 fc_params_t *params,
1415 int32_t inputlen, outputlen;
1416 struct cpt_ctx *cpt_ctx;
1417 uint32_t mac_len = 0;
1419 struct cpt_request_info *req;
1421 uint32_t encr_offset = 0, auth_offset = 0;
1422 uint32_t encr_data_len = 0, auth_data_len = 0;
1423 int flags, iv_len = 16;
1424 void *m_vaddr, *c_vaddr;
1425 uint64_t m_dma, c_dma, offset_ctrl;
1426 uint64_t *offset_vaddr, offset_dma;
1427 uint32_t *iv_s, iv[4];
1428 vq_cmd_word0_t vq_cmd_w0;
1429 vq_cmd_word3_t vq_cmd_w3;
1430 opcode_info_t opcode;
1432 buf_p = ¶ms->meta_buf;
1433 m_vaddr = buf_p->vaddr;
1434 m_dma = buf_p->dma_addr;
1436 cpt_ctx = params->ctx_buf.vaddr;
1437 flags = cpt_ctx->zsk_flags;
1438 mac_len = cpt_ctx->mac_len;
1439 snow3g = cpt_ctx->snow3g;
1442 * Save initial space that followed app data for completion code &
1443 * alternate completion code to fall in same cache line as app data
1445 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1446 m_dma += COMPLETION_CODE_SIZE;
1447 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1450 c_vaddr = (uint8_t *)m_vaddr + size;
1451 c_dma = m_dma + size;
1452 size += sizeof(cpt_res_s_t);
1454 m_vaddr = (uint8_t *)m_vaddr + size;
1457 /* Reserve memory for cpt request info */
1460 size = sizeof(struct cpt_request_info);
1461 m_vaddr = (uint8_t *)m_vaddr + size;
1464 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1466 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1468 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1469 (0 << 3) | (flags & 0x7));
1473 * Microcode expects offsets in bytes
1474 * TODO: Rounding off
1476 auth_data_len = AUTH_DLEN(d_lens);
1479 auth_offset = AUTH_OFFSET(d_offs);
1480 auth_offset = auth_offset / 8;
1482 /* consider iv len */
1483 auth_offset += iv_len;
1485 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1486 outputlen = mac_len;
1488 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1493 * Microcode expects offsets in bytes
1494 * TODO: Rounding off
1496 encr_data_len = ENCR_DLEN(d_lens);
1498 encr_offset = ENCR_OFFSET(d_offs);
1499 encr_offset = encr_offset / 8;
1500 /* consider iv len */
1501 encr_offset += iv_len;
1503 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1504 outputlen = inputlen;
1506 /* iv offset is 0 */
1507 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1511 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1516 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1517 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1520 for (j = 0; j < 4; j++)
1521 iv[j] = iv_s[3 - j];
1523 /* ZUC doesn't need a swap */
1524 for (j = 0; j < 4; j++)
1529 * GP op header, lengths are expected in bits.
1532 vq_cmd_w0.s.param1 = encr_data_len;
1533 vq_cmd_w0.s.param2 = auth_data_len;
1536 * In 83XX since we have a limitation of
1537 * IV & Offset control word not part of instruction
1538 * and need to be part of Data Buffer, we check if
1539 * head room is there and then only do the Direct mode processing
1541 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1542 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1543 void *dm_vaddr = params->bufs[0].vaddr;
1544 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1546 * This flag indicates that there is 24 bytes head room and
1547 * 8 bytes tail room available, so that we get to do
1548 * DIRECT MODE with limitation
1551 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1552 OFF_CTRL_LEN - iv_len);
1553 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1556 req->ist.ei1 = offset_dma;
1557 /* RPTR should just exclude offset control word */
1558 req->ist.ei2 = dm_dma_addr - iv_len;
1559 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1560 + outputlen - iv_len);
1562 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1564 vq_cmd_w0.s.opcode = opcode.flags;
1566 if (likely(iv_len)) {
1567 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1569 memcpy(iv_d, iv, 16);
1572 *offset_vaddr = offset_ctrl;
1574 uint32_t i, g_size_bytes, s_size_bytes;
1575 uint64_t dptr_dma, rptr_dma;
1576 sg_comp_t *gather_comp;
1577 sg_comp_t *scatter_comp;
1581 /* save space for iv */
1582 offset_vaddr = m_vaddr;
1585 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1586 m_dma += OFF_CTRL_LEN + iv_len;
1588 opcode.s.major |= CPT_DMA_MODE;
1590 vq_cmd_w0.s.opcode = opcode.flags;
1592 /* DPTR has SG list */
1593 in_buffer = m_vaddr;
1596 ((uint16_t *)in_buffer)[0] = 0;
1597 ((uint16_t *)in_buffer)[1] = 0;
1599 /* TODO Add error check if space will be sufficient */
1600 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1607 /* Offset control word followed by iv */
1609 i = fill_sg_comp(gather_comp, i, offset_dma,
1610 OFF_CTRL_LEN + iv_len);
1612 /* iv offset is 0 */
1613 *offset_vaddr = offset_ctrl;
1615 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1616 memcpy(iv_d, iv, 16);
1619 size = inputlen - iv_len;
1621 i = fill_sg_comp_from_iov(gather_comp, i,
1624 if (unlikely(size)) {
1625 CPT_LOG_DP_ERR("Insufficient buffer space,"
1626 " size %d needed", size);
1630 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1631 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1634 * Output Scatter List
1639 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1642 /* IV in SLIST only for EEA3 & UEA2 */
1647 i = fill_sg_comp(scatter_comp, i,
1648 offset_dma + OFF_CTRL_LEN, iv_len);
1651 /* Add output data */
1652 if (req_flags & VALID_MAC_BUF) {
1653 size = outputlen - iv_len - mac_len;
1655 i = fill_sg_comp_from_iov(scatter_comp, i,
1659 if (unlikely(size)) {
1660 CPT_LOG_DP_ERR("Insufficient buffer space,"
1661 " size %d needed", size);
1668 i = fill_sg_comp_from_buf(scatter_comp, i,
1672 /* Output including mac */
1673 size = outputlen - iv_len;
1675 i = fill_sg_comp_from_iov(scatter_comp, i,
1679 if (unlikely(size)) {
1680 CPT_LOG_DP_ERR("Insufficient buffer space,"
1681 " size %d needed", size);
1686 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1687 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1689 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1691 /* This is DPTR len incase of SG mode */
1692 vq_cmd_w0.s.dlen = size;
1694 m_vaddr = (uint8_t *)m_vaddr + size;
1697 /* cpt alternate completion address saved earlier */
1698 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1699 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1700 rptr_dma = c_dma - 8;
1702 req->ist.ei1 = dptr_dma;
1703 req->ist.ei2 = rptr_dma;
1708 vq_cmd_w3.s.grp = 0;
1709 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1710 offsetof(struct cpt_ctx, zs_ctx);
1712 /* 16 byte aligned cpt res address */
1713 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1714 *req->completion_addr = COMPLETION_CODE_INIT;
1715 req->comp_baddr = c_dma;
1717 /* Fill microcode part of instruction */
1718 req->ist.ei0 = vq_cmd_w0.u64;
1719 req->ist.ei3 = vq_cmd_w3.u64;
1727 static __rte_always_inline void
1728 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1731 fc_params_t *params,
1736 int32_t inputlen = 0, outputlen;
1737 struct cpt_ctx *cpt_ctx;
1738 uint8_t snow3g, iv_len = 16;
1739 struct cpt_request_info *req;
1741 uint32_t encr_offset;
1742 uint32_t encr_data_len;
1744 void *m_vaddr, *c_vaddr;
1745 uint64_t m_dma, c_dma;
1746 uint64_t *offset_vaddr, offset_dma;
1747 uint32_t *iv_s, iv[4], j;
1748 vq_cmd_word0_t vq_cmd_w0;
1749 vq_cmd_word3_t vq_cmd_w3;
1750 opcode_info_t opcode;
1752 buf_p = ¶ms->meta_buf;
1753 m_vaddr = buf_p->vaddr;
1754 m_dma = buf_p->dma_addr;
1757 * Microcode expects offsets in bytes
1758 * TODO: Rounding off
1760 encr_offset = ENCR_OFFSET(d_offs) / 8;
1761 encr_data_len = ENCR_DLEN(d_lens);
1763 cpt_ctx = params->ctx_buf.vaddr;
1764 flags = cpt_ctx->zsk_flags;
1765 snow3g = cpt_ctx->snow3g;
1767 * Save initial space that followed app data for completion code &
1768 * alternate completion code to fall in same cache line as app data
1770 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1771 m_dma += COMPLETION_CODE_SIZE;
1772 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1775 c_vaddr = (uint8_t *)m_vaddr + size;
1776 c_dma = m_dma + size;
1777 size += sizeof(cpt_res_s_t);
1779 m_vaddr = (uint8_t *)m_vaddr + size;
1782 /* Reserve memory for cpt request info */
1785 size = sizeof(struct cpt_request_info);
1786 m_vaddr = (uint8_t *)m_vaddr + size;
1789 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1791 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1793 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1794 (0 << 3) | (flags & 0x7));
1796 /* consider iv len */
1797 encr_offset += iv_len;
1799 inputlen = encr_offset +
1800 (RTE_ALIGN(encr_data_len, 8) / 8);
1801 outputlen = inputlen;
1804 iv_s = params->iv_buf;
1807 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1808 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1811 for (j = 0; j < 4; j++)
1812 iv[j] = iv_s[3 - j];
1814 /* ZUC doesn't need a swap */
1815 for (j = 0; j < 4; j++)
1820 * GP op header, lengths are expected in bits.
1823 vq_cmd_w0.s.param1 = encr_data_len;
1826 * In 83XX since we have a limitation of
1827 * IV & Offset control word not part of instruction
1828 * and need to be part of Data Buffer, we check if
1829 * head room is there and then only do the Direct mode processing
1831 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1832 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1833 void *dm_vaddr = params->bufs[0].vaddr;
1834 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1836 * This flag indicates that there is 24 bytes head room and
1837 * 8 bytes tail room available, so that we get to do
1838 * DIRECT MODE with limitation
1841 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1842 OFF_CTRL_LEN - iv_len);
1843 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1846 req->ist.ei1 = offset_dma;
1847 /* RPTR should just exclude offset control word */
1848 req->ist.ei2 = dm_dma_addr - iv_len;
1849 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1850 + outputlen - iv_len);
1852 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1854 vq_cmd_w0.s.opcode = opcode.flags;
1856 if (likely(iv_len)) {
1857 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1859 memcpy(iv_d, iv, 16);
1862 /* iv offset is 0 */
1863 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1865 uint32_t i, g_size_bytes, s_size_bytes;
1866 uint64_t dptr_dma, rptr_dma;
1867 sg_comp_t *gather_comp;
1868 sg_comp_t *scatter_comp;
1872 /* save space for offset and iv... */
1873 offset_vaddr = m_vaddr;
1876 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1877 m_dma += OFF_CTRL_LEN + iv_len;
1879 opcode.s.major |= CPT_DMA_MODE;
1881 vq_cmd_w0.s.opcode = opcode.flags;
1883 /* DPTR has SG list */
1884 in_buffer = m_vaddr;
1887 ((uint16_t *)in_buffer)[0] = 0;
1888 ((uint16_t *)in_buffer)[1] = 0;
1890 /* TODO Add error check if space will be sufficient */
1891 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1898 /* Offset control word */
1900 /* iv offset is 0 */
1901 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1903 i = fill_sg_comp(gather_comp, i, offset_dma,
1904 OFF_CTRL_LEN + iv_len);
1906 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1907 memcpy(iv_d, iv, 16);
1909 /* Add input data */
1910 size = inputlen - iv_len;
1912 i = fill_sg_comp_from_iov(gather_comp, i,
1915 if (unlikely(size)) {
1916 CPT_LOG_DP_ERR("Insufficient buffer space,"
1917 " size %d needed", size);
1921 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1922 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1925 * Output Scatter List
1930 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1933 i = fill_sg_comp(scatter_comp, i,
1934 offset_dma + OFF_CTRL_LEN,
1937 /* Add output data */
1938 size = outputlen - iv_len;
1940 i = fill_sg_comp_from_iov(scatter_comp, i,
1944 if (unlikely(size)) {
1945 CPT_LOG_DP_ERR("Insufficient buffer space,"
1946 " size %d needed", size);
1950 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1951 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1953 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1955 /* This is DPTR len incase of SG mode */
1956 vq_cmd_w0.s.dlen = size;
1958 m_vaddr = (uint8_t *)m_vaddr + size;
1961 /* cpt alternate completion address saved earlier */
1962 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1963 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1964 rptr_dma = c_dma - 8;
1966 req->ist.ei1 = dptr_dma;
1967 req->ist.ei2 = rptr_dma;
1972 vq_cmd_w3.s.grp = 0;
1973 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1974 offsetof(struct cpt_ctx, zs_ctx);
1976 /* 16 byte aligned cpt res address */
1977 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1978 *req->completion_addr = COMPLETION_CODE_INIT;
1979 req->comp_baddr = c_dma;
1981 /* Fill microcode part of instruction */
1982 req->ist.ei0 = vq_cmd_w0.u64;
1983 req->ist.ei3 = vq_cmd_w3.u64;
1991 static __rte_always_inline void
1992 cpt_kasumi_enc_prep(uint32_t req_flags,
1995 fc_params_t *params,
2000 int32_t inputlen = 0, outputlen = 0;
2001 struct cpt_ctx *cpt_ctx;
2002 uint32_t mac_len = 0;
2004 struct cpt_request_info *req;
2006 uint32_t encr_offset, auth_offset;
2007 uint32_t encr_data_len, auth_data_len;
2009 uint8_t *iv_s, *iv_d, iv_len = 8;
2011 void *m_vaddr, *c_vaddr;
2012 uint64_t m_dma, c_dma;
2013 uint64_t *offset_vaddr, offset_dma;
2014 vq_cmd_word0_t vq_cmd_w0;
2015 vq_cmd_word3_t vq_cmd_w3;
2016 opcode_info_t opcode;
2018 uint32_t g_size_bytes, s_size_bytes;
2019 uint64_t dptr_dma, rptr_dma;
2020 sg_comp_t *gather_comp;
2021 sg_comp_t *scatter_comp;
2023 buf_p = ¶ms->meta_buf;
2024 m_vaddr = buf_p->vaddr;
2025 m_dma = buf_p->dma_addr;
2027 encr_offset = ENCR_OFFSET(d_offs) / 8;
2028 auth_offset = AUTH_OFFSET(d_offs) / 8;
2029 encr_data_len = ENCR_DLEN(d_lens);
2030 auth_data_len = AUTH_DLEN(d_lens);
2032 cpt_ctx = params->ctx_buf.vaddr;
2033 flags = cpt_ctx->zsk_flags;
2034 mac_len = cpt_ctx->mac_len;
2037 iv_s = params->iv_buf;
2039 iv_s = params->auth_iv_buf;
2041 dir = iv_s[8] & 0x1;
2044 * Save initial space that followed app data for completion code &
2045 * alternate completion code to fall in same cache line as app data
2047 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2048 m_dma += COMPLETION_CODE_SIZE;
2049 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2052 c_vaddr = (uint8_t *)m_vaddr + size;
2053 c_dma = m_dma + size;
2054 size += sizeof(cpt_res_s_t);
2056 m_vaddr = (uint8_t *)m_vaddr + size;
2059 /* Reserve memory for cpt request info */
2062 size = sizeof(struct cpt_request_info);
2063 m_vaddr = (uint8_t *)m_vaddr + size;
2066 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2068 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2069 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2070 (dir << 4) | (0 << 3) | (flags & 0x7));
2073 * GP op header, lengths are expected in bits.
2076 vq_cmd_w0.s.param1 = encr_data_len;
2077 vq_cmd_w0.s.param2 = auth_data_len;
2078 vq_cmd_w0.s.opcode = opcode.flags;
2080 /* consider iv len */
2082 encr_offset += iv_len;
2083 auth_offset += iv_len;
2086 /* save space for offset ctrl and iv */
2087 offset_vaddr = m_vaddr;
2090 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2091 m_dma += OFF_CTRL_LEN + iv_len;
2093 /* DPTR has SG list */
2094 in_buffer = m_vaddr;
2097 ((uint16_t *)in_buffer)[0] = 0;
2098 ((uint16_t *)in_buffer)[1] = 0;
2100 /* TODO Add error check if space will be sufficient */
2101 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2108 /* Offset control word followed by iv */
2111 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2112 outputlen = inputlen;
2113 /* iv offset is 0 */
2114 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2116 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2117 outputlen = mac_len;
2118 /* iv offset is 0 */
2119 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2122 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2125 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2126 memcpy(iv_d, iv_s, iv_len);
2129 size = inputlen - iv_len;
2131 i = fill_sg_comp_from_iov(gather_comp, i,
2135 if (unlikely(size)) {
2136 CPT_LOG_DP_ERR("Insufficient buffer space,"
2137 " size %d needed", size);
2141 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2142 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2145 * Output Scatter List
2149 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2152 /* IV in SLIST only for F8 */
2158 i = fill_sg_comp(scatter_comp, i,
2159 offset_dma + OFF_CTRL_LEN,
2163 /* Add output data */
2164 if (req_flags & VALID_MAC_BUF) {
2165 size = outputlen - iv_len - mac_len;
2167 i = fill_sg_comp_from_iov(scatter_comp, i,
2171 if (unlikely(size)) {
2172 CPT_LOG_DP_ERR("Insufficient buffer space,"
2173 " size %d needed", size);
2180 i = fill_sg_comp_from_buf(scatter_comp, i,
2184 /* Output including mac */
2185 size = outputlen - iv_len;
2187 i = fill_sg_comp_from_iov(scatter_comp, i,
2191 if (unlikely(size)) {
2192 CPT_LOG_DP_ERR("Insufficient buffer space,"
2193 " size %d needed", size);
2198 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2199 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2201 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2203 /* This is DPTR len incase of SG mode */
2204 vq_cmd_w0.s.dlen = size;
2206 m_vaddr = (uint8_t *)m_vaddr + size;
2209 /* cpt alternate completion address saved earlier */
2210 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2211 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2212 rptr_dma = c_dma - 8;
2214 req->ist.ei1 = dptr_dma;
2215 req->ist.ei2 = rptr_dma;
2219 vq_cmd_w3.s.grp = 0;
2220 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2221 offsetof(struct cpt_ctx, k_ctx);
2223 /* 16 byte aligned cpt res address */
2224 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2225 *req->completion_addr = COMPLETION_CODE_INIT;
2226 req->comp_baddr = c_dma;
2228 /* Fill microcode part of instruction */
2229 req->ist.ei0 = vq_cmd_w0.u64;
2230 req->ist.ei3 = vq_cmd_w3.u64;
2238 static __rte_always_inline void
2239 cpt_kasumi_dec_prep(uint64_t d_offs,
2241 fc_params_t *params,
2246 int32_t inputlen = 0, outputlen;
2247 struct cpt_ctx *cpt_ctx;
2248 uint8_t i = 0, iv_len = 8;
2249 struct cpt_request_info *req;
2251 uint32_t encr_offset;
2252 uint32_t encr_data_len;
2255 void *m_vaddr, *c_vaddr;
2256 uint64_t m_dma, c_dma;
2257 uint64_t *offset_vaddr, offset_dma;
2258 vq_cmd_word0_t vq_cmd_w0;
2259 vq_cmd_word3_t vq_cmd_w3;
2260 opcode_info_t opcode;
2262 uint32_t g_size_bytes, s_size_bytes;
2263 uint64_t dptr_dma, rptr_dma;
2264 sg_comp_t *gather_comp;
2265 sg_comp_t *scatter_comp;
2267 buf_p = ¶ms->meta_buf;
2268 m_vaddr = buf_p->vaddr;
2269 m_dma = buf_p->dma_addr;
2271 encr_offset = ENCR_OFFSET(d_offs) / 8;
2272 encr_data_len = ENCR_DLEN(d_lens);
2274 cpt_ctx = params->ctx_buf.vaddr;
2275 flags = cpt_ctx->zsk_flags;
2277 * Save initial space that followed app data for completion code &
2278 * alternate completion code to fall in same cache line as app data
2280 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2281 m_dma += COMPLETION_CODE_SIZE;
2282 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2285 c_vaddr = (uint8_t *)m_vaddr + size;
2286 c_dma = m_dma + size;
2287 size += sizeof(cpt_res_s_t);
2289 m_vaddr = (uint8_t *)m_vaddr + size;
2292 /* Reserve memory for cpt request info */
2295 size = sizeof(struct cpt_request_info);
2296 m_vaddr = (uint8_t *)m_vaddr + size;
2299 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2301 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2302 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2303 (dir << 4) | (0 << 3) | (flags & 0x7));
2306 * GP op header, lengths are expected in bits.
2309 vq_cmd_w0.s.param1 = encr_data_len;
2310 vq_cmd_w0.s.opcode = opcode.flags;
2312 /* consider iv len */
2313 encr_offset += iv_len;
2315 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2316 outputlen = inputlen;
2318 /* save space for offset ctrl & iv */
2319 offset_vaddr = m_vaddr;
2322 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2323 m_dma += OFF_CTRL_LEN + iv_len;
2325 /* DPTR has SG list */
2326 in_buffer = m_vaddr;
2329 ((uint16_t *)in_buffer)[0] = 0;
2330 ((uint16_t *)in_buffer)[1] = 0;
2332 /* TODO Add error check if space will be sufficient */
2333 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2340 /* Offset control word followed by iv */
2341 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2343 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2346 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2347 params->iv_buf, iv_len);
2349 /* Add input data */
2350 size = inputlen - iv_len;
2352 i = fill_sg_comp_from_iov(gather_comp, i,
2355 if (unlikely(size)) {
2356 CPT_LOG_DP_ERR("Insufficient buffer space,"
2357 " size %d needed", size);
2361 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2362 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2365 * Output Scatter List
2369 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2372 i = fill_sg_comp(scatter_comp, i,
2373 offset_dma + OFF_CTRL_LEN,
2376 /* Add output data */
2377 size = outputlen - iv_len;
2379 i = fill_sg_comp_from_iov(scatter_comp, i,
2382 if (unlikely(size)) {
2383 CPT_LOG_DP_ERR("Insufficient buffer space,"
2384 " size %d needed", size);
2388 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2389 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2391 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2393 /* This is DPTR len incase of SG mode */
2394 vq_cmd_w0.s.dlen = size;
2396 m_vaddr = (uint8_t *)m_vaddr + size;
2399 /* cpt alternate completion address saved earlier */
2400 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2401 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2402 rptr_dma = c_dma - 8;
2404 req->ist.ei1 = dptr_dma;
2405 req->ist.ei2 = rptr_dma;
2409 vq_cmd_w3.s.grp = 0;
2410 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2411 offsetof(struct cpt_ctx, k_ctx);
2413 /* 16 byte aligned cpt res address */
2414 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2415 *req->completion_addr = COMPLETION_CODE_INIT;
2416 req->comp_baddr = c_dma;
2418 /* Fill microcode part of instruction */
2419 req->ist.ei0 = vq_cmd_w0.u64;
2420 req->ist.ei3 = vq_cmd_w3.u64;
2428 static __rte_always_inline void *
2429 cpt_fc_dec_hmac_prep(uint32_t flags,
2432 fc_params_t *fc_params,
2435 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2437 void *prep_req = NULL;
2439 fc_type = ctx->fc_type;
2441 if (likely(fc_type == FC_GEN)) {
2442 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2444 } else if (fc_type == ZUC_SNOW3G) {
2445 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2447 } else if (fc_type == KASUMI) {
2448 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2452 * For AUTH_ONLY case,
2453 * MC only supports digest generation and verification
2454 * should be done in software by memcmp()
2460 static __rte_always_inline void *__hot
2461 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2462 fc_params_t *fc_params, void *op)
2464 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2466 void *prep_req = NULL;
2468 fc_type = ctx->fc_type;
2470 /* Common api for rest of the ops */
2471 if (likely(fc_type == FC_GEN)) {
2472 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2474 } else if (fc_type == ZUC_SNOW3G) {
2475 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2477 } else if (fc_type == KASUMI) {
2478 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2480 } else if (fc_type == HASH_HMAC) {
2481 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2487 static __rte_always_inline int
2488 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2489 uint16_t key_len, uint16_t mac_len)
2491 struct cpt_ctx *cpt_ctx = ctx;
2492 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2494 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2499 /* No support for AEAD yet */
2500 if (cpt_ctx->enc_cipher)
2502 /* For ZUC/SNOW3G/Kasumi */
2505 cpt_ctx->snow3g = 1;
2506 gen_key_snow3g(key, keyx);
2507 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2508 cpt_ctx->fc_type = ZUC_SNOW3G;
2509 cpt_ctx->zsk_flags = 0x1;
2512 cpt_ctx->snow3g = 0;
2513 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2514 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2515 cpt_ctx->fc_type = ZUC_SNOW3G;
2516 cpt_ctx->zsk_flags = 0x1;
2519 /* Kasumi ECB mode */
2521 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2522 cpt_ctx->fc_type = KASUMI;
2523 cpt_ctx->zsk_flags = 0x1;
2526 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2527 cpt_ctx->fc_type = KASUMI;
2528 cpt_ctx->zsk_flags = 0x1;
2533 cpt_ctx->mac_len = 4;
2534 cpt_ctx->hash_type = type;
2538 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2539 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2540 cpt_ctx->fc_type = HASH_HMAC;
2543 if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
2546 /* For GMAC auth, cipher must be NULL */
2547 if (type == GMAC_TYPE)
2548 fctx->enc.enc_cipher = 0;
2550 fctx->enc.hash_type = cpt_ctx->hash_type = type;
2551 fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
2555 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2556 memcpy(cpt_ctx->auth_key, key, key_len);
2557 cpt_ctx->auth_key_len = key_len;
2558 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2559 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2562 memcpy(fctx->hmac.opad, key, key_len);
2563 fctx->enc.auth_input_type = 1;
2568 static __rte_always_inline int
2569 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2570 struct cpt_sess_misc *sess)
2572 struct rte_crypto_aead_xform *aead_form;
2573 cipher_type_t enc_type = 0; /* NULL Cipher type */
2574 auth_type_t auth_type = 0; /* NULL Auth type */
2575 uint32_t cipher_key_len = 0;
2576 uint8_t aes_gcm = 0;
2577 aead_form = &xform->aead;
2578 void *ctx = SESS_PRIV(sess);
2580 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2581 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2582 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2583 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2584 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2585 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2586 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2587 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2589 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2592 switch (aead_form->algo) {
2593 case RTE_CRYPTO_AEAD_AES_GCM:
2595 cipher_key_len = 16;
2598 case RTE_CRYPTO_AEAD_AES_CCM:
2599 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2603 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2607 if (aead_form->key.length < cipher_key_len) {
2608 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2609 (unsigned int long)aead_form->key.length);
2613 sess->aes_gcm = aes_gcm;
2614 sess->mac_len = aead_form->digest_length;
2615 sess->iv_offset = aead_form->iv.offset;
2616 sess->iv_length = aead_form->iv.length;
2617 sess->aad_length = aead_form->aad_length;
2619 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2620 aead_form->key.length, NULL)))
2623 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2624 aead_form->digest_length)))
2630 static __rte_always_inline int
2631 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2632 struct cpt_sess_misc *sess)
2634 struct rte_crypto_cipher_xform *c_form;
2635 cipher_type_t enc_type = 0; /* NULL Cipher type */
2636 uint32_t cipher_key_len = 0;
2637 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2639 c_form = &xform->cipher;
2641 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2642 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2643 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2644 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2646 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2650 switch (c_form->algo) {
2651 case RTE_CRYPTO_CIPHER_AES_CBC:
2653 cipher_key_len = 16;
2655 case RTE_CRYPTO_CIPHER_3DES_CBC:
2656 enc_type = DES3_CBC;
2657 cipher_key_len = 24;
2659 case RTE_CRYPTO_CIPHER_DES_CBC:
2660 /* DES is implemented using 3DES in hardware */
2661 enc_type = DES3_CBC;
2664 case RTE_CRYPTO_CIPHER_AES_CTR:
2666 cipher_key_len = 16;
2669 case RTE_CRYPTO_CIPHER_NULL:
2673 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2674 enc_type = KASUMI_F8_ECB;
2675 cipher_key_len = 16;
2678 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2679 enc_type = SNOW3G_UEA2;
2680 cipher_key_len = 16;
2683 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2684 enc_type = ZUC_EEA3;
2685 cipher_key_len = 16;
2688 case RTE_CRYPTO_CIPHER_AES_XTS:
2690 cipher_key_len = 16;
2692 case RTE_CRYPTO_CIPHER_3DES_ECB:
2693 enc_type = DES3_ECB;
2694 cipher_key_len = 24;
2696 case RTE_CRYPTO_CIPHER_AES_ECB:
2698 cipher_key_len = 16;
2700 case RTE_CRYPTO_CIPHER_3DES_CTR:
2701 case RTE_CRYPTO_CIPHER_AES_F8:
2702 case RTE_CRYPTO_CIPHER_ARC4:
2703 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2707 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2712 if (c_form->key.length < cipher_key_len) {
2713 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2714 (unsigned long) c_form->key.length);
2718 sess->zsk_flag = zsk_flag;
2720 sess->aes_ctr = aes_ctr;
2721 sess->iv_offset = c_form->iv.offset;
2722 sess->iv_length = c_form->iv.length;
2723 sess->is_null = is_null;
2725 if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
2726 c_form->key.data, c_form->key.length, NULL)))
2732 static __rte_always_inline int
2733 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2734 struct cpt_sess_misc *sess)
2736 struct rte_crypto_auth_xform *a_form;
2737 auth_type_t auth_type = 0; /* NULL Auth type */
2738 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2740 a_form = &xform->auth;
2742 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2743 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2744 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2745 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2747 CPT_LOG_DP_ERR("Unknown auth operation");
2751 switch (a_form->algo) {
2752 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2754 case RTE_CRYPTO_AUTH_SHA1:
2755 auth_type = SHA1_TYPE;
2757 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2758 case RTE_CRYPTO_AUTH_SHA256:
2759 auth_type = SHA2_SHA256;
2761 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2762 case RTE_CRYPTO_AUTH_SHA512:
2763 auth_type = SHA2_SHA512;
2765 case RTE_CRYPTO_AUTH_AES_GMAC:
2766 auth_type = GMAC_TYPE;
2769 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2770 case RTE_CRYPTO_AUTH_SHA224:
2771 auth_type = SHA2_SHA224;
2773 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2774 case RTE_CRYPTO_AUTH_SHA384:
2775 auth_type = SHA2_SHA384;
2777 case RTE_CRYPTO_AUTH_MD5_HMAC:
2778 case RTE_CRYPTO_AUTH_MD5:
2779 auth_type = MD5_TYPE;
2781 case RTE_CRYPTO_AUTH_KASUMI_F9:
2782 auth_type = KASUMI_F9_ECB;
2784 * Indicate that direction needs to be taken out
2789 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2790 auth_type = SNOW3G_UIA2;
2793 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2794 auth_type = ZUC_EIA3;
2797 case RTE_CRYPTO_AUTH_NULL:
2801 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2802 case RTE_CRYPTO_AUTH_AES_CMAC:
2803 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2804 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2808 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2813 sess->zsk_flag = zsk_flag;
2814 sess->aes_gcm = aes_gcm;
2815 sess->mac_len = a_form->digest_length;
2816 sess->is_null = is_null;
2818 sess->auth_iv_offset = a_form->iv.offset;
2819 sess->auth_iv_length = a_form->iv.length;
2821 if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
2822 a_form->key.data, a_form->key.length,
2823 a_form->digest_length)))
2829 static __rte_always_inline int
2830 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2831 struct cpt_sess_misc *sess)
2833 struct rte_crypto_auth_xform *a_form;
2834 cipher_type_t enc_type = 0; /* NULL Cipher type */
2835 auth_type_t auth_type = 0; /* NULL Auth type */
2836 void *ctx = SESS_PRIV(sess);
2838 a_form = &xform->auth;
2840 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2841 sess->cpt_op |= CPT_OP_ENCODE;
2842 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2843 sess->cpt_op |= CPT_OP_DECODE;
2845 CPT_LOG_DP_ERR("Unknown auth operation");
2849 switch (a_form->algo) {
2850 case RTE_CRYPTO_AUTH_AES_GMAC:
2852 auth_type = GMAC_TYPE;
2855 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2863 sess->iv_offset = a_form->iv.offset;
2864 sess->iv_length = a_form->iv.length;
2865 sess->mac_len = a_form->digest_length;
2867 if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2868 a_form->key.length, NULL)))
2871 if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
2872 a_form->digest_length)))
2878 static __rte_always_inline void *
2879 alloc_op_meta(struct rte_mbuf *m_src,
2882 struct rte_mempool *cpt_meta_pool)
2886 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2887 if (likely(m_src && (m_src->nb_segs == 1))) {
2891 /* Check if tailroom is sufficient to hold meta data */
2892 tailroom = rte_pktmbuf_tailroom(m_src);
2893 if (likely(tailroom > len + 8)) {
2894 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2895 mphys = m_src->buf_physaddr + m_src->buf_len;
2899 buf->dma_addr = mphys;
2901 /* Indicate that this is a mbuf allocated mdata */
2902 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2907 RTE_SET_USED(m_src);
2910 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2914 buf->dma_addr = rte_mempool_virt2iova(mdata);
2921 * cpt_free_metabuf - free metabuf to mempool.
2922 * @param instance: pointer to instance.
2923 * @param objp: pointer to the metabuf.
2925 static __rte_always_inline void
2926 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2928 bool nofree = ((uintptr_t)mdata & 1ull);
2932 rte_mempool_put(cpt_meta_pool, mdata);
2935 static __rte_always_inline uint32_t
2936 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2937 iov_ptr_t *iovec, uint32_t start_offset)
2940 void *seg_data = NULL;
2941 phys_addr_t seg_phys;
2942 int32_t seg_size = 0;
2949 if (!start_offset) {
2950 seg_data = rte_pktmbuf_mtod(pkt, void *);
2951 seg_phys = rte_pktmbuf_mtophys(pkt);
2952 seg_size = pkt->data_len;
2954 while (start_offset >= pkt->data_len) {
2955 start_offset -= pkt->data_len;
2959 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2960 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
2961 seg_size = pkt->data_len - start_offset;
2967 iovec->bufs[index].vaddr = seg_data;
2968 iovec->bufs[index].dma_addr = seg_phys;
2969 iovec->bufs[index].size = seg_size;
2973 while (unlikely(pkt != NULL)) {
2974 seg_data = rte_pktmbuf_mtod(pkt, void *);
2975 seg_phys = rte_pktmbuf_mtophys(pkt);
2976 seg_size = pkt->data_len;
2980 iovec->bufs[index].vaddr = seg_data;
2981 iovec->bufs[index].dma_addr = seg_phys;
2982 iovec->bufs[index].size = seg_size;
2989 iovec->buf_cnt = index;
2993 static __rte_always_inline uint32_t
2994 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2999 void *seg_data = NULL;
3000 phys_addr_t seg_phys;
3001 uint32_t seg_size = 0;
3004 seg_data = rte_pktmbuf_mtod(pkt, void *);
3005 seg_phys = rte_pktmbuf_mtophys(pkt);
3006 seg_size = pkt->data_len;
3009 if (likely(!pkt->next)) {
3010 uint32_t headroom, tailroom;
3012 *flags |= SINGLE_BUF_INPLACE;
3013 headroom = rte_pktmbuf_headroom(pkt);
3014 tailroom = rte_pktmbuf_tailroom(pkt);
3015 if (likely((headroom >= 24) &&
3017 /* In 83XX this is prerequivisit for Direct mode */
3018 *flags |= SINGLE_BUF_HEADTAILROOM;
3020 param->bufs[0].vaddr = seg_data;
3021 param->bufs[0].dma_addr = seg_phys;
3022 param->bufs[0].size = seg_size;
3025 iovec = param->src_iov;
3026 iovec->bufs[index].vaddr = seg_data;
3027 iovec->bufs[index].dma_addr = seg_phys;
3028 iovec->bufs[index].size = seg_size;
3032 while (unlikely(pkt != NULL)) {
3033 seg_data = rte_pktmbuf_mtod(pkt, void *);
3034 seg_phys = rte_pktmbuf_mtophys(pkt);
3035 seg_size = pkt->data_len;
3040 iovec->bufs[index].vaddr = seg_data;
3041 iovec->bufs[index].dma_addr = seg_phys;
3042 iovec->bufs[index].size = seg_size;
3049 iovec->buf_cnt = index;
3053 static __rte_always_inline int
3054 fill_fc_params(struct rte_crypto_op *cop,
3055 struct cpt_sess_misc *sess_misc,
3056 struct cpt_qp_meta_info *m_info,
3061 struct rte_crypto_sym_op *sym_op = cop->sym;
3064 uint32_t mc_hash_off;
3066 uint64_t d_offs, d_lens;
3067 struct rte_mbuf *m_src, *m_dst;
3068 uint8_t cpt_op = sess_misc->cpt_op;
3069 #ifdef CPT_ALWAYS_USE_SG_MODE
3070 uint8_t inplace = 0;
3072 uint8_t inplace = 1;
3074 fc_params_t fc_params;
3075 char src[SRC_IOV_SIZE];
3076 char dst[SRC_IOV_SIZE];
3080 if (likely(sess_misc->iv_length)) {
3081 flags |= VALID_IV_BUF;
3082 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3083 uint8_t *, sess_misc->iv_offset);
3084 if (sess_misc->aes_ctr &&
3085 unlikely(sess_misc->iv_length != 16)) {
3086 memcpy((uint8_t *)iv_buf,
3087 rte_crypto_op_ctod_offset(cop,
3088 uint8_t *, sess_misc->iv_offset), 12);
3089 iv_buf[3] = rte_cpu_to_be_32(0x1);
3090 fc_params.iv_buf = iv_buf;
3094 if (sess_misc->zsk_flag) {
3095 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3097 sess_misc->auth_iv_offset);
3098 if (sess_misc->zsk_flag != ZS_EA)
3101 m_src = sym_op->m_src;
3102 m_dst = sym_op->m_dst;
3104 if (sess_misc->aes_gcm) {
3109 d_offs = sym_op->aead.data.offset;
3110 d_lens = sym_op->aead.data.length;
3111 mc_hash_off = sym_op->aead.data.offset +
3112 sym_op->aead.data.length;
3114 aad_data = sym_op->aead.aad.data;
3115 aad_len = sess_misc->aad_length;
3116 if (likely((aad_data + aad_len) ==
3117 rte_pktmbuf_mtod_offset(m_src,
3119 sym_op->aead.data.offset))) {
3120 d_offs = (d_offs - aad_len) | (d_offs << 16);
3121 d_lens = (d_lens + aad_len) | (d_lens << 32);
3123 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3124 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3125 fc_params.aad_buf.size = aad_len;
3126 flags |= VALID_AAD_BUF;
3128 d_offs = d_offs << 16;
3129 d_lens = d_lens << 32;
3132 salt = fc_params.iv_buf;
3133 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3134 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3135 sess_misc->salt = *(uint32_t *)salt;
3137 fc_params.iv_buf = salt + 4;
3138 if (likely(sess_misc->mac_len)) {
3139 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3145 /* hmac immediately following data is best case */
3146 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3148 (uint8_t *)sym_op->aead.digest.data)) {
3149 flags |= VALID_MAC_BUF;
3150 fc_params.mac_buf.size = sess_misc->mac_len;
3151 fc_params.mac_buf.vaddr =
3152 sym_op->aead.digest.data;
3153 fc_params.mac_buf.dma_addr =
3154 sym_op->aead.digest.phys_addr;
3159 d_offs = sym_op->cipher.data.offset;
3160 d_lens = sym_op->cipher.data.length;
3161 mc_hash_off = sym_op->cipher.data.offset +
3162 sym_op->cipher.data.length;
3163 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3164 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3166 if (mc_hash_off < (sym_op->auth.data.offset +
3167 sym_op->auth.data.length)){
3168 mc_hash_off = (sym_op->auth.data.offset +
3169 sym_op->auth.data.length);
3171 /* for gmac, salt should be updated like in gcm */
3172 if (unlikely(sess_misc->is_gmac)) {
3174 salt = fc_params.iv_buf;
3175 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3176 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3177 sess_misc->salt = *(uint32_t *)salt;
3179 fc_params.iv_buf = salt + 4;
3181 if (likely(sess_misc->mac_len)) {
3184 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3188 /* hmac immediately following data is best case */
3189 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3191 (uint8_t *)sym_op->auth.digest.data)) {
3192 flags |= VALID_MAC_BUF;
3193 fc_params.mac_buf.size =
3195 fc_params.mac_buf.vaddr =
3196 sym_op->auth.digest.data;
3197 fc_params.mac_buf.dma_addr =
3198 sym_op->auth.digest.phys_addr;
3203 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3204 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3206 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3209 if (likely(!m_dst && inplace)) {
3210 /* Case of single buffer without AAD buf or
3211 * separate mac buf in place and
3214 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3216 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3219 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3225 /* Out of place processing */
3226 fc_params.src_iov = (void *)src;
3227 fc_params.dst_iov = (void *)dst;
3229 /* Store SG I/O in the api for reuse */
3230 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3231 CPT_LOG_DP_ERR("Prepare src iov failed");
3236 if (unlikely(m_dst != NULL)) {
3239 /* Try to make room as much as src has */
3240 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3242 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3243 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3244 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3245 CPT_LOG_DP_ERR("Not enough space in "
3254 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3255 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3261 fc_params.dst_iov = (void *)src;
3265 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3266 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3267 m_info->lb_mlen, m_info->pool);
3269 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3270 m_info->sg_mlen, m_info->pool);
3272 if (unlikely(mdata == NULL)) {
3273 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3278 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3279 op[0] = (uintptr_t)mdata;
3280 op[1] = (uintptr_t)cop;
3281 op[2] = op[3] = 0; /* Used to indicate auth verify */
3282 space += 4 * sizeof(uint64_t);
3284 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3285 fc_params.meta_buf.dma_addr += space;
3286 fc_params.meta_buf.size -= space;
3288 /* Finally prepare the instruction */
3289 if (cpt_op & CPT_OP_ENCODE)
3290 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3293 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3296 if (unlikely(*prep_req == NULL)) {
3297 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3299 goto free_mdata_and_exit;
3306 free_mdata_and_exit:
3307 free_op_meta(mdata, m_info->pool);
3312 static __rte_always_inline void
3313 compl_auth_verify(struct rte_crypto_op *op,
3318 struct rte_crypto_sym_op *sym_op = op->sym;
3320 if (sym_op->auth.digest.data)
3321 mac = sym_op->auth.digest.data;
3323 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3325 sym_op->auth.data.length +
3326 sym_op->auth.data.offset);
3328 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3332 if (memcmp(mac, gen_mac, mac_len))
3333 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3335 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3338 static __rte_always_inline int
3339 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3341 struct rte_crypto_sym_xform *chain;
3343 CPT_PMD_INIT_FUNC_TRACE();
3345 if (cpt_is_algo_supported(xform))
3350 switch (chain->type) {
3351 case RTE_CRYPTO_SYM_XFORM_AEAD:
3352 if (fill_sess_aead(chain, sess))
3355 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3356 if (fill_sess_cipher(chain, sess))
3359 case RTE_CRYPTO_SYM_XFORM_AUTH:
3360 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3361 if (fill_sess_gmac(chain, sess))
3364 if (fill_sess_auth(chain, sess))
3369 CPT_LOG_DP_ERR("Invalid crypto xform type");
3372 chain = chain->next;
3381 static __rte_always_inline void
3382 find_kasumif9_direction_and_length(uint8_t *src,
3383 uint32_t counter_num_bytes,
3384 uint32_t *addr_length_in_bits,
3385 uint8_t *addr_direction)
3390 while (!found && counter_num_bytes > 0) {
3391 counter_num_bytes--;
3392 if (src[counter_num_bytes] == 0x00)
3394 pos = rte_bsf32(src[counter_num_bytes]);
3396 if (likely(counter_num_bytes > 0)) {
3397 last_byte = src[counter_num_bytes - 1];
3398 *addr_direction = last_byte & 0x1;
3399 *addr_length_in_bits = counter_num_bytes * 8
3403 last_byte = src[counter_num_bytes];
3404 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3405 *addr_length_in_bits = counter_num_bytes * 8
3413 * This handles all auth only except AES_GMAC
3415 static __rte_always_inline int
3416 fill_digest_params(struct rte_crypto_op *cop,
3417 struct cpt_sess_misc *sess,
3418 struct cpt_qp_meta_info *m_info,
3423 struct rte_crypto_sym_op *sym_op = cop->sym;
3427 uint32_t auth_range_off;
3429 uint64_t d_offs = 0, d_lens;
3430 struct rte_mbuf *m_src, *m_dst;
3431 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3432 uint16_t mac_len = sess->mac_len;
3434 char src[SRC_IOV_SIZE];
3438 memset(¶ms, 0, sizeof(fc_params_t));
3440 m_src = sym_op->m_src;
3442 /* For just digest lets force mempool alloc */
3443 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3445 if (mdata == NULL) {
3450 mphys = params.meta_buf.dma_addr;
3453 op[0] = (uintptr_t)mdata;
3454 op[1] = (uintptr_t)cop;
3455 op[2] = op[3] = 0; /* Used to indicate auth verify */
3456 space += 4 * sizeof(uint64_t);
3458 auth_range_off = sym_op->auth.data.offset;
3460 flags = VALID_MAC_BUF;
3461 params.src_iov = (void *)src;
3462 if (unlikely(sess->zsk_flag)) {
3464 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3465 * we will send pass through even for auth only case,
3468 d_offs = auth_range_off;
3470 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3471 uint8_t *, sess->auth_iv_offset);
3472 if (sess->zsk_flag == K_F9) {
3473 uint32_t length_in_bits, num_bytes;
3474 uint8_t *src, direction = 0;
3476 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3479 * This is kasumi f9, take direction from
3482 length_in_bits = cop->sym->auth.data.length;
3483 num_bytes = (length_in_bits >> 3);
3484 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3485 find_kasumif9_direction_and_length(src,
3489 length_in_bits -= 64;
3490 cop->sym->auth.data.offset += 64;
3491 d_offs = cop->sym->auth.data.offset;
3492 auth_range_off = d_offs / 8;
3493 cop->sym->auth.data.length = length_in_bits;
3495 /* Store it at end of auth iv */
3496 iv_buf[8] = direction;
3497 params.auth_iv_buf = iv_buf;
3501 d_lens = sym_op->auth.data.length;
3503 params.ctx_buf.vaddr = SESS_PRIV(sess);
3504 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3506 if (auth_op == CPT_OP_AUTH_GENERATE) {
3507 if (sym_op->auth.digest.data) {
3509 * Digest to be generated
3510 * in separate buffer
3512 params.mac_buf.size =
3514 params.mac_buf.vaddr =
3515 sym_op->auth.digest.data;
3516 params.mac_buf.dma_addr =
3517 sym_op->auth.digest.phys_addr;
3519 uint32_t off = sym_op->auth.data.offset +
3520 sym_op->auth.data.length;
3521 int32_t dlen, space;
3523 m_dst = sym_op->m_dst ?
3524 sym_op->m_dst : sym_op->m_src;
3525 dlen = rte_pktmbuf_pkt_len(m_dst);
3527 space = off + mac_len - dlen;
3529 if (!rte_pktmbuf_append(m_dst, space)) {
3530 CPT_LOG_DP_ERR("Failed to extend "
3531 "mbuf by %uB", space);
3533 goto free_mdata_and_exit;
3536 params.mac_buf.vaddr =
3537 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3538 params.mac_buf.dma_addr =
3539 rte_pktmbuf_mtophys_offset(m_dst, off);
3540 params.mac_buf.size = mac_len;
3543 /* Need space for storing generated mac */
3544 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3545 params.mac_buf.dma_addr = mphys + space;
3546 params.mac_buf.size = mac_len;
3547 space += RTE_ALIGN_CEIL(mac_len, 8);
3548 op[2] = (uintptr_t)params.mac_buf.vaddr;
3552 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3553 params.meta_buf.dma_addr = mphys + space;
3554 params.meta_buf.size -= space;
3556 /* Out of place processing */
3557 params.src_iov = (void *)src;
3559 /*Store SG I/O in the api for reuse */
3560 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3561 CPT_LOG_DP_ERR("Prepare src iov failed");
3563 goto free_mdata_and_exit;
3566 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3567 if (unlikely(*prep_req == NULL)) {
3569 goto free_mdata_and_exit;
3576 free_mdata_and_exit:
3577 free_op_meta(mdata, m_info->pool);
3582 #endif /*_CPT_UCODE_H_ */