1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
108 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
113 key_len = key_len / 2;
114 if (unlikely(key_len == CPT_BYTE_24)) {
115 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
118 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
124 if (unlikely(key_len != 16))
126 /* No support for AEAD yet */
127 if (unlikely(ctx->hash_type))
129 fc_type = ZUC_SNOW3G;
133 if (unlikely(key_len != 16))
135 /* No support for AEAD yet */
136 if (unlikely(ctx->hash_type))
144 ctx->fc_type = fc_type;
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->zsk_flags = 0;
198 static __rte_always_inline void
199 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
203 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
204 cpt_ctx->zsk_flags = 0;
207 static __rte_always_inline void
208 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
211 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
212 cpt_ctx->zsk_flags = 0;
215 static __rte_always_inline int
216 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
217 uint16_t key_len, uint8_t *salt)
219 struct cpt_ctx *cpt_ctx = ctx;
220 mc_fc_context_t *fctx = &cpt_ctx->fctx;
221 uint64_t *ctrl_flags = NULL;
224 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
228 if (cpt_ctx->fc_type == FC_GEN) {
229 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
230 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
232 * We need to always say IV is from DPTR as user can
233 * sometimes iverride IV per operation.
235 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
240 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
243 /* CPT performs DES using 3DES with the 8B DES-key
244 * replicated 2 more times to match the 24B 3DES-key.
245 * Eg. If org. key is "0x0a 0x0b", then new key is
246 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
249 /* Skipping the first 8B as it will be copied
250 * in the regular code flow
252 memcpy(fctx->enc.encr_key+key_len, key, key_len);
253 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
257 /* For DES3_ECB IV need to be from CTX. */
258 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
264 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
267 /* Even though iv source is from dptr,
268 * aes_gcm salt is taken from ctx
271 memcpy(fctx->enc.encr_iv, salt, 4);
272 /* Assuming it was just salt update
278 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
281 key_len = key_len / 2;
282 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
284 /* Copy key2 for XTS into ipad */
285 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
286 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
289 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
292 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
295 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
298 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
304 /* Only for FC_GEN case */
306 /* For GMAC auth, cipher must be NULL */
307 if (cpt_ctx->hash_type != GMAC_TYPE)
308 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
310 memcpy(fctx->enc.encr_key, key, key_len);
313 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
316 cpt_ctx->enc_cipher = type;
321 static __rte_always_inline uint32_t
322 fill_sg_comp(sg_comp_t *list,
324 phys_addr_t dma_addr,
327 sg_comp_t *to = &list[i>>2];
329 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
330 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
335 static __rte_always_inline uint32_t
336 fill_sg_comp_from_buf(sg_comp_t *list,
340 sg_comp_t *to = &list[i>>2];
342 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
343 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
348 static __rte_always_inline uint32_t
349 fill_sg_comp_from_buf_min(sg_comp_t *list,
354 sg_comp_t *to = &list[i >> 2];
355 uint32_t size = *psize;
358 e_len = (size > from->size) ? from->size : size;
359 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
360 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
367 * This fills the MC expected SGIO list
368 * from IOV given by user.
370 static __rte_always_inline uint32_t
371 fill_sg_comp_from_iov(sg_comp_t *list,
373 iov_ptr_t *from, uint32_t from_offset,
374 uint32_t *psize, buf_ptr_t *extra_buf,
375 uint32_t extra_offset)
378 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
379 uint32_t size = *psize - extra_len;
383 for (j = 0; (j < from->buf_cnt) && size; j++) {
384 phys_addr_t e_dma_addr;
386 sg_comp_t *to = &list[i >> 2];
391 if (unlikely(from_offset)) {
392 if (from_offset >= bufs[j].size) {
393 from_offset -= bufs[j].size;
396 e_dma_addr = bufs[j].dma_addr + from_offset;
397 e_len = (size > (bufs[j].size - from_offset)) ?
398 (bufs[j].size - from_offset) : size;
401 e_dma_addr = bufs[j].dma_addr;
402 e_len = (size > bufs[j].size) ?
406 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
407 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
409 if (extra_len && (e_len >= extra_offset)) {
410 /* Break the data at given offset */
411 uint32_t next_len = e_len - extra_offset;
412 phys_addr_t next_dma = e_dma_addr + extra_offset;
417 e_len = extra_offset;
419 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
422 /* Insert extra data ptr */
427 rte_cpu_to_be_16(extra_buf->size);
429 rte_cpu_to_be_64(extra_buf->dma_addr);
431 /* size already decremented by extra len */
434 /* insert the rest of the data */
438 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
439 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
448 extra_offset -= size;
456 static __rte_always_inline void
457 cpt_digest_gen_prep(uint32_t flags,
459 digest_params_t *params,
463 struct cpt_request_info *req;
465 uint16_t data_len, mac_len, key_len;
466 auth_type_t hash_type;
469 sg_comp_t *gather_comp;
470 sg_comp_t *scatter_comp;
472 uint32_t g_size_bytes, s_size_bytes;
473 uint64_t dptr_dma, rptr_dma;
474 vq_cmd_word0_t vq_cmd_w0;
475 vq_cmd_word3_t vq_cmd_w3;
476 void *c_vaddr, *m_vaddr;
477 uint64_t c_dma, m_dma;
478 opcode_info_t opcode;
480 ctx = params->ctx_buf.vaddr;
481 meta_p = ¶ms->meta_buf;
483 m_vaddr = meta_p->vaddr;
484 m_dma = meta_p->dma_addr;
487 * Save initial space that followed app data for completion code &
488 * alternate completion code to fall in same cache line as app data
490 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
491 m_dma += COMPLETION_CODE_SIZE;
492 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
494 c_vaddr = (uint8_t *)m_vaddr + size;
495 c_dma = m_dma + size;
496 size += sizeof(cpt_res_s_t);
498 m_vaddr = (uint8_t *)m_vaddr + size;
503 size = sizeof(struct cpt_request_info);
504 m_vaddr = (uint8_t *)m_vaddr + size;
507 hash_type = ctx->hash_type;
508 mac_len = ctx->mac_len;
509 key_len = ctx->auth_key_len;
510 data_len = AUTH_DLEN(d_lens);
514 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
516 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
517 vq_cmd_w0.s.param1 = key_len;
518 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
520 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
521 vq_cmd_w0.s.param1 = 0;
522 vq_cmd_w0.s.dlen = data_len;
527 /* Null auth only case enters the if */
528 if (unlikely(!hash_type && !ctx->enc_cipher)) {
529 opcode.s.major = CPT_MAJOR_OP_MISC;
530 /* Minor op is passthrough */
531 opcode.s.minor = 0x03;
532 /* Send out completion code only */
533 vq_cmd_w0.s.param2 = 0x1;
536 vq_cmd_w0.s.opcode = opcode.flags;
538 /* DPTR has SG list */
542 ((uint16_t *)in_buffer)[0] = 0;
543 ((uint16_t *)in_buffer)[1] = 0;
545 /* TODO Add error check if space will be sufficient */
546 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
555 uint64_t k_dma = params->ctx_buf.dma_addr +
556 offsetof(struct cpt_ctx, auth_key);
558 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
564 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
566 if (unlikely(size)) {
567 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
573 * Looks like we need to support zero data
574 * gather ptr in case of hash & hmac
578 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
579 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
586 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
588 if (flags & VALID_MAC_BUF) {
589 if (unlikely(params->mac_buf.size < mac_len)) {
590 CPT_LOG_DP_ERR("Insufficient MAC size");
595 i = fill_sg_comp_from_buf_min(scatter_comp, i,
596 ¶ms->mac_buf, &size);
599 i = fill_sg_comp_from_iov(scatter_comp, i,
600 params->src_iov, data_len,
602 if (unlikely(size)) {
603 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
609 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
610 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
612 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
614 /* This is DPTR len incase of SG mode */
615 vq_cmd_w0.s.dlen = size;
617 m_vaddr = (uint8_t *)m_vaddr + size;
620 /* cpt alternate completion address saved earlier */
621 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
622 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
623 rptr_dma = c_dma - 8;
625 req->ist.ei1 = dptr_dma;
626 req->ist.ei2 = rptr_dma;
631 /* 16 byte aligned cpt res address */
632 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
633 *req->completion_addr = COMPLETION_CODE_INIT;
634 req->comp_baddr = c_dma;
636 /* Fill microcode part of instruction */
637 req->ist.ei0 = vq_cmd_w0.u64;
638 req->ist.ei3 = vq_cmd_w3.u64;
646 static __rte_always_inline void
647 cpt_enc_hmac_prep(uint32_t flags,
650 fc_params_t *fc_params,
654 uint32_t iv_offset = 0;
655 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
656 struct cpt_ctx *cpt_ctx;
657 uint32_t cipher_type, hash_type;
658 uint32_t mac_len, size;
660 struct cpt_request_info *req;
661 buf_ptr_t *meta_p, *aad_buf = NULL;
662 uint32_t encr_offset, auth_offset;
663 uint32_t encr_data_len, auth_data_len, aad_len = 0;
664 uint32_t passthrough_len = 0;
665 void *m_vaddr, *offset_vaddr;
666 uint64_t m_dma, offset_dma, ctx_dma;
667 vq_cmd_word0_t vq_cmd_w0;
668 vq_cmd_word3_t vq_cmd_w3;
671 opcode_info_t opcode;
673 meta_p = &fc_params->meta_buf;
674 m_vaddr = meta_p->vaddr;
675 m_dma = meta_p->dma_addr;
677 encr_offset = ENCR_OFFSET(d_offs);
678 auth_offset = AUTH_OFFSET(d_offs);
679 encr_data_len = ENCR_DLEN(d_lens);
680 auth_data_len = AUTH_DLEN(d_lens);
681 if (unlikely(flags & VALID_AAD_BUF)) {
683 * We dont support both aad
684 * and auth data separately
688 aad_len = fc_params->aad_buf.size;
689 aad_buf = &fc_params->aad_buf;
691 cpt_ctx = fc_params->ctx_buf.vaddr;
692 cipher_type = cpt_ctx->enc_cipher;
693 hash_type = cpt_ctx->hash_type;
694 mac_len = cpt_ctx->mac_len;
697 * Save initial space that followed app data for completion code &
698 * alternate completion code to fall in same cache line as app data
700 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
701 m_dma += COMPLETION_CODE_SIZE;
702 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
705 c_vaddr = (uint8_t *)m_vaddr + size;
706 c_dma = m_dma + size;
707 size += sizeof(cpt_res_s_t);
709 m_vaddr = (uint8_t *)m_vaddr + size;
712 /* start cpt request info struct at 8 byte boundary */
713 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
716 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
718 size += sizeof(struct cpt_request_info);
719 m_vaddr = (uint8_t *)m_vaddr + size;
722 if (hash_type == GMAC_TYPE)
725 if (unlikely(!(flags & VALID_IV_BUF))) {
727 iv_offset = ENCR_IV_OFFSET(d_offs);
730 if (unlikely(flags & VALID_AAD_BUF)) {
732 * When AAD is given, data above encr_offset is pass through
733 * Since AAD is given as separate pointer and not as offset,
734 * this is a special case as we need to fragment input data
735 * into passthrough + encr_data and then insert AAD in between.
737 if (hash_type != GMAC_TYPE) {
738 passthrough_len = encr_offset;
739 auth_offset = passthrough_len + iv_len;
740 encr_offset = passthrough_len + aad_len + iv_len;
741 auth_data_len = aad_len + encr_data_len;
743 passthrough_len = 16 + aad_len;
744 auth_offset = passthrough_len + iv_len;
745 auth_data_len = aad_len;
748 encr_offset += iv_len;
749 auth_offset += iv_len;
753 opcode.s.major = CPT_MAJOR_OP_FC;
756 auth_dlen = auth_offset + auth_data_len;
757 enc_dlen = encr_data_len + encr_offset;
758 if (unlikely(encr_data_len & 0xf)) {
759 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
760 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
761 else if (likely((cipher_type == AES_CBC) ||
762 (cipher_type == AES_ECB)))
763 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
766 if (unlikely(hash_type == GMAC_TYPE)) {
767 encr_offset = auth_dlen;
771 if (unlikely(auth_dlen > enc_dlen)) {
772 inputlen = auth_dlen;
773 outputlen = auth_dlen + mac_len;
776 outputlen = enc_dlen + mac_len;
781 vq_cmd_w0.s.param1 = encr_data_len;
782 vq_cmd_w0.s.param2 = auth_data_len;
784 * In 83XX since we have a limitation of
785 * IV & Offset control word not part of instruction
786 * and need to be part of Data Buffer, we check if
787 * head room is there and then only do the Direct mode processing
789 if (likely((flags & SINGLE_BUF_INPLACE) &&
790 (flags & SINGLE_BUF_HEADTAILROOM))) {
791 void *dm_vaddr = fc_params->bufs[0].vaddr;
792 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
794 * This flag indicates that there is 24 bytes head room and
795 * 8 bytes tail room available, so that we get to do
796 * DIRECT MODE with limitation
799 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
800 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
803 req->ist.ei1 = offset_dma;
804 /* RPTR should just exclude offset control word */
805 req->ist.ei2 = dm_dma_addr - iv_len;
806 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
807 + outputlen - iv_len);
809 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
811 vq_cmd_w0.s.opcode = opcode.flags;
813 if (likely(iv_len)) {
814 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
816 uint64_t *src = fc_params->iv_buf;
821 *(uint64_t *)offset_vaddr =
822 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
823 ((uint64_t)iv_offset << 8) |
824 ((uint64_t)auth_offset));
827 uint32_t i, g_size_bytes, s_size_bytes;
828 uint64_t dptr_dma, rptr_dma;
829 sg_comp_t *gather_comp;
830 sg_comp_t *scatter_comp;
833 /* This falls under strict SG mode */
834 offset_vaddr = m_vaddr;
836 size = OFF_CTRL_LEN + iv_len;
838 m_vaddr = (uint8_t *)m_vaddr + size;
841 opcode.s.major |= CPT_DMA_MODE;
843 vq_cmd_w0.s.opcode = opcode.flags;
845 if (likely(iv_len)) {
846 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
848 uint64_t *src = fc_params->iv_buf;
853 *(uint64_t *)offset_vaddr =
854 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
855 ((uint64_t)iv_offset << 8) |
856 ((uint64_t)auth_offset));
858 /* DPTR has SG list */
862 ((uint16_t *)in_buffer)[0] = 0;
863 ((uint16_t *)in_buffer)[1] = 0;
865 /* TODO Add error check if space will be sufficient */
866 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
874 /* Offset control word that includes iv */
875 i = fill_sg_comp(gather_comp, i, offset_dma,
876 OFF_CTRL_LEN + iv_len);
879 size = inputlen - iv_len;
881 uint32_t aad_offset = aad_len ? passthrough_len : 0;
883 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
884 i = fill_sg_comp_from_buf_min(gather_comp, i,
888 i = fill_sg_comp_from_iov(gather_comp, i,
891 aad_buf, aad_offset);
894 if (unlikely(size)) {
895 CPT_LOG_DP_ERR("Insufficient buffer space,"
896 " size %d needed", size);
900 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
901 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
904 * Output Scatter list
908 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
911 if (likely(iv_len)) {
912 i = fill_sg_comp(scatter_comp, i,
913 offset_dma + OFF_CTRL_LEN,
917 /* output data or output data + digest*/
918 if (unlikely(flags & VALID_MAC_BUF)) {
919 size = outputlen - iv_len - mac_len;
921 uint32_t aad_offset =
922 aad_len ? passthrough_len : 0;
924 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
925 i = fill_sg_comp_from_buf_min(
931 i = fill_sg_comp_from_iov(scatter_comp,
939 if (unlikely(size)) {
940 CPT_LOG_DP_ERR("Insufficient buffer"
941 " space, size %d needed",
948 i = fill_sg_comp_from_buf(scatter_comp, i,
949 &fc_params->mac_buf);
952 /* Output including mac */
953 size = outputlen - iv_len;
955 uint32_t aad_offset =
956 aad_len ? passthrough_len : 0;
958 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
959 i = fill_sg_comp_from_buf_min(
965 i = fill_sg_comp_from_iov(scatter_comp,
973 if (unlikely(size)) {
974 CPT_LOG_DP_ERR("Insufficient buffer"
975 " space, size %d needed",
981 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
982 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
984 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
986 /* This is DPTR len incase of SG mode */
987 vq_cmd_w0.s.dlen = size;
989 m_vaddr = (uint8_t *)m_vaddr + size;
992 /* cpt alternate completion address saved earlier */
993 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
994 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
995 rptr_dma = c_dma - 8;
997 req->ist.ei1 = dptr_dma;
998 req->ist.ei2 = rptr_dma;
1001 ctx_dma = fc_params->ctx_buf.dma_addr +
1002 offsetof(struct cpt_ctx, fctx);
1005 vq_cmd_w3.s.grp = 0;
1006 vq_cmd_w3.s.cptr = ctx_dma;
1008 /* 16 byte aligned cpt res address */
1009 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1010 *req->completion_addr = COMPLETION_CODE_INIT;
1011 req->comp_baddr = c_dma;
1013 /* Fill microcode part of instruction */
1014 req->ist.ei0 = vq_cmd_w0.u64;
1015 req->ist.ei3 = vq_cmd_w3.u64;
1023 static __rte_always_inline void
1024 cpt_dec_hmac_prep(uint32_t flags,
1027 fc_params_t *fc_params,
1031 uint32_t iv_offset = 0, size;
1032 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1033 struct cpt_ctx *cpt_ctx;
1034 int32_t hash_type, mac_len;
1035 uint8_t iv_len = 16;
1036 struct cpt_request_info *req;
1037 buf_ptr_t *meta_p, *aad_buf = NULL;
1038 uint32_t encr_offset, auth_offset;
1039 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1040 uint32_t passthrough_len = 0;
1041 void *m_vaddr, *offset_vaddr;
1042 uint64_t m_dma, offset_dma, ctx_dma;
1043 opcode_info_t opcode;
1044 vq_cmd_word0_t vq_cmd_w0;
1045 vq_cmd_word3_t vq_cmd_w3;
1049 meta_p = &fc_params->meta_buf;
1050 m_vaddr = meta_p->vaddr;
1051 m_dma = meta_p->dma_addr;
1053 encr_offset = ENCR_OFFSET(d_offs);
1054 auth_offset = AUTH_OFFSET(d_offs);
1055 encr_data_len = ENCR_DLEN(d_lens);
1056 auth_data_len = AUTH_DLEN(d_lens);
1058 if (unlikely(flags & VALID_AAD_BUF)) {
1060 * We dont support both aad
1061 * and auth data separately
1065 aad_len = fc_params->aad_buf.size;
1066 aad_buf = &fc_params->aad_buf;
1069 cpt_ctx = fc_params->ctx_buf.vaddr;
1070 hash_type = cpt_ctx->hash_type;
1071 mac_len = cpt_ctx->mac_len;
1073 if (hash_type == GMAC_TYPE)
1076 if (unlikely(!(flags & VALID_IV_BUF))) {
1078 iv_offset = ENCR_IV_OFFSET(d_offs);
1081 if (unlikely(flags & VALID_AAD_BUF)) {
1083 * When AAD is given, data above encr_offset is pass through
1084 * Since AAD is given as separate pointer and not as offset,
1085 * this is a special case as we need to fragment input data
1086 * into passthrough + encr_data and then insert AAD in between.
1088 if (hash_type != GMAC_TYPE) {
1089 passthrough_len = encr_offset;
1090 auth_offset = passthrough_len + iv_len;
1091 encr_offset = passthrough_len + aad_len + iv_len;
1092 auth_data_len = aad_len + encr_data_len;
1094 passthrough_len = 16 + aad_len;
1095 auth_offset = passthrough_len + iv_len;
1096 auth_data_len = aad_len;
1099 encr_offset += iv_len;
1100 auth_offset += iv_len;
1104 * Save initial space that followed app data for completion code &
1105 * alternate completion code to fall in same cache line as app data
1107 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1108 m_dma += COMPLETION_CODE_SIZE;
1109 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1111 c_vaddr = (uint8_t *)m_vaddr + size;
1112 c_dma = m_dma + size;
1113 size += sizeof(cpt_res_s_t);
1115 m_vaddr = (uint8_t *)m_vaddr + size;
1118 /* start cpt request info structure at 8 byte alignment */
1119 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1122 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1124 size += sizeof(struct cpt_request_info);
1125 m_vaddr = (uint8_t *)m_vaddr + size;
1129 opcode.s.major = CPT_MAJOR_OP_FC;
1132 enc_dlen = encr_offset + encr_data_len;
1133 auth_dlen = auth_offset + auth_data_len;
1135 if (auth_dlen > enc_dlen) {
1136 inputlen = auth_dlen + mac_len;
1137 outputlen = auth_dlen;
1139 inputlen = enc_dlen + mac_len;
1140 outputlen = enc_dlen;
1143 if (hash_type == GMAC_TYPE)
1144 encr_offset = inputlen;
1147 vq_cmd_w0.s.param1 = encr_data_len;
1148 vq_cmd_w0.s.param2 = auth_data_len;
1151 * In 83XX since we have a limitation of
1152 * IV & Offset control word not part of instruction
1153 * and need to be part of Data Buffer, we check if
1154 * head room is there and then only do the Direct mode processing
1156 if (likely((flags & SINGLE_BUF_INPLACE) &&
1157 (flags & SINGLE_BUF_HEADTAILROOM))) {
1158 void *dm_vaddr = fc_params->bufs[0].vaddr;
1159 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1161 * This flag indicates that there is 24 bytes head room and
1162 * 8 bytes tail room available, so that we get to do
1163 * DIRECT MODE with limitation
1166 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1167 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1168 req->ist.ei1 = offset_dma;
1170 /* RPTR should just exclude offset control word */
1171 req->ist.ei2 = dm_dma_addr - iv_len;
1173 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1174 outputlen - iv_len);
1175 /* since this is decryption,
1176 * don't touch the content of
1177 * alternate ccode space as it contains
1181 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1183 vq_cmd_w0.s.opcode = opcode.flags;
1185 if (likely(iv_len)) {
1186 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1188 uint64_t *src = fc_params->iv_buf;
1193 *(uint64_t *)offset_vaddr =
1194 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1195 ((uint64_t)iv_offset << 8) |
1196 ((uint64_t)auth_offset));
1199 uint64_t dptr_dma, rptr_dma;
1200 uint32_t g_size_bytes, s_size_bytes;
1201 sg_comp_t *gather_comp;
1202 sg_comp_t *scatter_comp;
1206 /* This falls under strict SG mode */
1207 offset_vaddr = m_vaddr;
1209 size = OFF_CTRL_LEN + iv_len;
1211 m_vaddr = (uint8_t *)m_vaddr + size;
1214 opcode.s.major |= CPT_DMA_MODE;
1216 vq_cmd_w0.s.opcode = opcode.flags;
1218 if (likely(iv_len)) {
1219 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1221 uint64_t *src = fc_params->iv_buf;
1226 *(uint64_t *)offset_vaddr =
1227 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1228 ((uint64_t)iv_offset << 8) |
1229 ((uint64_t)auth_offset));
1231 /* DPTR has SG list */
1232 in_buffer = m_vaddr;
1235 ((uint16_t *)in_buffer)[0] = 0;
1236 ((uint16_t *)in_buffer)[1] = 0;
1238 /* TODO Add error check if space will be sufficient */
1239 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1246 /* Offset control word that includes iv */
1247 i = fill_sg_comp(gather_comp, i, offset_dma,
1248 OFF_CTRL_LEN + iv_len);
1250 /* Add input data */
1251 if (flags & VALID_MAC_BUF) {
1252 size = inputlen - iv_len - mac_len;
1254 /* input data only */
1255 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1256 i = fill_sg_comp_from_buf_min(
1261 uint32_t aad_offset = aad_len ?
1262 passthrough_len : 0;
1264 i = fill_sg_comp_from_iov(gather_comp,
1271 if (unlikely(size)) {
1272 CPT_LOG_DP_ERR("Insufficient buffer"
1273 " space, size %d needed",
1281 i = fill_sg_comp_from_buf(gather_comp, i,
1282 &fc_params->mac_buf);
1285 /* input data + mac */
1286 size = inputlen - iv_len;
1288 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1289 i = fill_sg_comp_from_buf_min(
1294 uint32_t aad_offset = aad_len ?
1295 passthrough_len : 0;
1297 if (unlikely(!fc_params->src_iov)) {
1298 CPT_LOG_DP_ERR("Bad input args");
1302 i = fill_sg_comp_from_iov(
1310 if (unlikely(size)) {
1311 CPT_LOG_DP_ERR("Insufficient buffer"
1312 " space, size %d needed",
1318 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1319 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1322 * Output Scatter List
1327 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1331 i = fill_sg_comp(scatter_comp, i,
1332 offset_dma + OFF_CTRL_LEN,
1336 /* Add output data */
1337 size = outputlen - iv_len;
1339 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1340 /* handle single buffer here */
1341 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1345 uint32_t aad_offset = aad_len ?
1346 passthrough_len : 0;
1348 if (unlikely(!fc_params->dst_iov)) {
1349 CPT_LOG_DP_ERR("Bad input args");
1353 i = fill_sg_comp_from_iov(scatter_comp, i,
1354 fc_params->dst_iov, 0,
1359 if (unlikely(size)) {
1360 CPT_LOG_DP_ERR("Insufficient buffer space,"
1361 " size %d needed", size);
1366 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1367 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1369 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1371 /* This is DPTR len incase of SG mode */
1372 vq_cmd_w0.s.dlen = size;
1374 m_vaddr = (uint8_t *)m_vaddr + size;
1377 /* cpt alternate completion address saved earlier */
1378 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1379 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1380 rptr_dma = c_dma - 8;
1381 size += COMPLETION_CODE_SIZE;
1383 req->ist.ei1 = dptr_dma;
1384 req->ist.ei2 = rptr_dma;
1387 ctx_dma = fc_params->ctx_buf.dma_addr +
1388 offsetof(struct cpt_ctx, fctx);
1391 vq_cmd_w3.s.grp = 0;
1392 vq_cmd_w3.s.cptr = ctx_dma;
1394 /* 16 byte aligned cpt res address */
1395 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1396 *req->completion_addr = COMPLETION_CODE_INIT;
1397 req->comp_baddr = c_dma;
1399 /* Fill microcode part of instruction */
1400 req->ist.ei0 = vq_cmd_w0.u64;
1401 req->ist.ei3 = vq_cmd_w3.u64;
1409 static __rte_always_inline void
1410 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1413 fc_params_t *params,
1418 int32_t inputlen, outputlen;
1419 struct cpt_ctx *cpt_ctx;
1420 uint32_t mac_len = 0;
1422 struct cpt_request_info *req;
1424 uint32_t encr_offset = 0, auth_offset = 0;
1425 uint32_t encr_data_len = 0, auth_data_len = 0;
1426 int flags, iv_len = 16;
1427 void *m_vaddr, *c_vaddr;
1428 uint64_t m_dma, c_dma, offset_ctrl;
1429 uint64_t *offset_vaddr, offset_dma;
1430 uint32_t *iv_s, iv[4];
1431 vq_cmd_word0_t vq_cmd_w0;
1432 vq_cmd_word3_t vq_cmd_w3;
1433 opcode_info_t opcode;
1435 buf_p = ¶ms->meta_buf;
1436 m_vaddr = buf_p->vaddr;
1437 m_dma = buf_p->dma_addr;
1439 cpt_ctx = params->ctx_buf.vaddr;
1440 flags = cpt_ctx->zsk_flags;
1441 mac_len = cpt_ctx->mac_len;
1442 snow3g = cpt_ctx->snow3g;
1445 * Save initial space that followed app data for completion code &
1446 * alternate completion code to fall in same cache line as app data
1448 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1449 m_dma += COMPLETION_CODE_SIZE;
1450 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1453 c_vaddr = (uint8_t *)m_vaddr + size;
1454 c_dma = m_dma + size;
1455 size += sizeof(cpt_res_s_t);
1457 m_vaddr = (uint8_t *)m_vaddr + size;
1460 /* Reserve memory for cpt request info */
1463 size = sizeof(struct cpt_request_info);
1464 m_vaddr = (uint8_t *)m_vaddr + size;
1467 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1469 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1471 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1472 (0 << 3) | (flags & 0x7));
1476 * Microcode expects offsets in bytes
1477 * TODO: Rounding off
1479 auth_data_len = AUTH_DLEN(d_lens);
1482 auth_offset = AUTH_OFFSET(d_offs);
1483 auth_offset = auth_offset / 8;
1485 /* consider iv len */
1486 auth_offset += iv_len;
1488 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1489 outputlen = mac_len;
1491 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1496 * Microcode expects offsets in bytes
1497 * TODO: Rounding off
1499 encr_data_len = ENCR_DLEN(d_lens);
1501 encr_offset = ENCR_OFFSET(d_offs);
1502 encr_offset = encr_offset / 8;
1503 /* consider iv len */
1504 encr_offset += iv_len;
1506 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1507 outputlen = inputlen;
1509 /* iv offset is 0 */
1510 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1514 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1519 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1520 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1523 for (j = 0; j < 4; j++)
1524 iv[j] = iv_s[3 - j];
1526 /* ZUC doesn't need a swap */
1527 for (j = 0; j < 4; j++)
1532 * GP op header, lengths are expected in bits.
1535 vq_cmd_w0.s.param1 = encr_data_len;
1536 vq_cmd_w0.s.param2 = auth_data_len;
1539 * In 83XX since we have a limitation of
1540 * IV & Offset control word not part of instruction
1541 * and need to be part of Data Buffer, we check if
1542 * head room is there and then only do the Direct mode processing
1544 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1545 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1546 void *dm_vaddr = params->bufs[0].vaddr;
1547 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1549 * This flag indicates that there is 24 bytes head room and
1550 * 8 bytes tail room available, so that we get to do
1551 * DIRECT MODE with limitation
1554 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1555 OFF_CTRL_LEN - iv_len);
1556 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1559 req->ist.ei1 = offset_dma;
1560 /* RPTR should just exclude offset control word */
1561 req->ist.ei2 = dm_dma_addr - iv_len;
1562 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1563 + outputlen - iv_len);
1565 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1567 vq_cmd_w0.s.opcode = opcode.flags;
1569 if (likely(iv_len)) {
1570 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1572 memcpy(iv_d, iv, 16);
1575 *offset_vaddr = offset_ctrl;
1577 uint32_t i, g_size_bytes, s_size_bytes;
1578 uint64_t dptr_dma, rptr_dma;
1579 sg_comp_t *gather_comp;
1580 sg_comp_t *scatter_comp;
1584 /* save space for iv */
1585 offset_vaddr = m_vaddr;
1588 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1589 m_dma += OFF_CTRL_LEN + iv_len;
1591 opcode.s.major |= CPT_DMA_MODE;
1593 vq_cmd_w0.s.opcode = opcode.flags;
1595 /* DPTR has SG list */
1596 in_buffer = m_vaddr;
1599 ((uint16_t *)in_buffer)[0] = 0;
1600 ((uint16_t *)in_buffer)[1] = 0;
1602 /* TODO Add error check if space will be sufficient */
1603 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1610 /* Offset control word followed by iv */
1612 i = fill_sg_comp(gather_comp, i, offset_dma,
1613 OFF_CTRL_LEN + iv_len);
1615 /* iv offset is 0 */
1616 *offset_vaddr = offset_ctrl;
1618 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1619 memcpy(iv_d, iv, 16);
1622 size = inputlen - iv_len;
1624 i = fill_sg_comp_from_iov(gather_comp, i,
1627 if (unlikely(size)) {
1628 CPT_LOG_DP_ERR("Insufficient buffer space,"
1629 " size %d needed", size);
1633 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1634 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1637 * Output Scatter List
1642 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1645 /* IV in SLIST only for EEA3 & UEA2 */
1650 i = fill_sg_comp(scatter_comp, i,
1651 offset_dma + OFF_CTRL_LEN, iv_len);
1654 /* Add output data */
1655 if (req_flags & VALID_MAC_BUF) {
1656 size = outputlen - iv_len - mac_len;
1658 i = fill_sg_comp_from_iov(scatter_comp, i,
1662 if (unlikely(size)) {
1663 CPT_LOG_DP_ERR("Insufficient buffer space,"
1664 " size %d needed", size);
1671 i = fill_sg_comp_from_buf(scatter_comp, i,
1675 /* Output including mac */
1676 size = outputlen - iv_len;
1678 i = fill_sg_comp_from_iov(scatter_comp, i,
1682 if (unlikely(size)) {
1683 CPT_LOG_DP_ERR("Insufficient buffer space,"
1684 " size %d needed", size);
1689 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1690 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1692 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1694 /* This is DPTR len incase of SG mode */
1695 vq_cmd_w0.s.dlen = size;
1697 m_vaddr = (uint8_t *)m_vaddr + size;
1700 /* cpt alternate completion address saved earlier */
1701 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1702 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1703 rptr_dma = c_dma - 8;
1705 req->ist.ei1 = dptr_dma;
1706 req->ist.ei2 = rptr_dma;
1711 vq_cmd_w3.s.grp = 0;
1712 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1713 offsetof(struct cpt_ctx, zs_ctx);
1715 /* 16 byte aligned cpt res address */
1716 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1717 *req->completion_addr = COMPLETION_CODE_INIT;
1718 req->comp_baddr = c_dma;
1720 /* Fill microcode part of instruction */
1721 req->ist.ei0 = vq_cmd_w0.u64;
1722 req->ist.ei3 = vq_cmd_w3.u64;
1730 static __rte_always_inline void
1731 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1734 fc_params_t *params,
1739 int32_t inputlen = 0, outputlen;
1740 struct cpt_ctx *cpt_ctx;
1741 uint8_t snow3g, iv_len = 16;
1742 struct cpt_request_info *req;
1744 uint32_t encr_offset;
1745 uint32_t encr_data_len;
1747 void *m_vaddr, *c_vaddr;
1748 uint64_t m_dma, c_dma;
1749 uint64_t *offset_vaddr, offset_dma;
1750 uint32_t *iv_s, iv[4], j;
1751 vq_cmd_word0_t vq_cmd_w0;
1752 vq_cmd_word3_t vq_cmd_w3;
1753 opcode_info_t opcode;
1755 buf_p = ¶ms->meta_buf;
1756 m_vaddr = buf_p->vaddr;
1757 m_dma = buf_p->dma_addr;
1760 * Microcode expects offsets in bytes
1761 * TODO: Rounding off
1763 encr_offset = ENCR_OFFSET(d_offs) / 8;
1764 encr_data_len = ENCR_DLEN(d_lens);
1766 cpt_ctx = params->ctx_buf.vaddr;
1767 flags = cpt_ctx->zsk_flags;
1768 snow3g = cpt_ctx->snow3g;
1770 * Save initial space that followed app data for completion code &
1771 * alternate completion code to fall in same cache line as app data
1773 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1774 m_dma += COMPLETION_CODE_SIZE;
1775 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1778 c_vaddr = (uint8_t *)m_vaddr + size;
1779 c_dma = m_dma + size;
1780 size += sizeof(cpt_res_s_t);
1782 m_vaddr = (uint8_t *)m_vaddr + size;
1785 /* Reserve memory for cpt request info */
1788 size = sizeof(struct cpt_request_info);
1789 m_vaddr = (uint8_t *)m_vaddr + size;
1792 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1794 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1796 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1797 (0 << 3) | (flags & 0x7));
1799 /* consider iv len */
1800 encr_offset += iv_len;
1802 inputlen = encr_offset +
1803 (RTE_ALIGN(encr_data_len, 8) / 8);
1804 outputlen = inputlen;
1807 iv_s = params->iv_buf;
1810 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1811 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1814 for (j = 0; j < 4; j++)
1815 iv[j] = iv_s[3 - j];
1817 /* ZUC doesn't need a swap */
1818 for (j = 0; j < 4; j++)
1823 * GP op header, lengths are expected in bits.
1826 vq_cmd_w0.s.param1 = encr_data_len;
1829 * In 83XX since we have a limitation of
1830 * IV & Offset control word not part of instruction
1831 * and need to be part of Data Buffer, we check if
1832 * head room is there and then only do the Direct mode processing
1834 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1835 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1836 void *dm_vaddr = params->bufs[0].vaddr;
1837 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1839 * This flag indicates that there is 24 bytes head room and
1840 * 8 bytes tail room available, so that we get to do
1841 * DIRECT MODE with limitation
1844 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1845 OFF_CTRL_LEN - iv_len);
1846 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1849 req->ist.ei1 = offset_dma;
1850 /* RPTR should just exclude offset control word */
1851 req->ist.ei2 = dm_dma_addr - iv_len;
1852 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1853 + outputlen - iv_len);
1855 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1857 vq_cmd_w0.s.opcode = opcode.flags;
1859 if (likely(iv_len)) {
1860 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1862 memcpy(iv_d, iv, 16);
1865 /* iv offset is 0 */
1866 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1868 uint32_t i, g_size_bytes, s_size_bytes;
1869 uint64_t dptr_dma, rptr_dma;
1870 sg_comp_t *gather_comp;
1871 sg_comp_t *scatter_comp;
1875 /* save space for offset and iv... */
1876 offset_vaddr = m_vaddr;
1879 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1880 m_dma += OFF_CTRL_LEN + iv_len;
1882 opcode.s.major |= CPT_DMA_MODE;
1884 vq_cmd_w0.s.opcode = opcode.flags;
1886 /* DPTR has SG list */
1887 in_buffer = m_vaddr;
1890 ((uint16_t *)in_buffer)[0] = 0;
1891 ((uint16_t *)in_buffer)[1] = 0;
1893 /* TODO Add error check if space will be sufficient */
1894 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1901 /* Offset control word */
1903 /* iv offset is 0 */
1904 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1906 i = fill_sg_comp(gather_comp, i, offset_dma,
1907 OFF_CTRL_LEN + iv_len);
1909 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1910 memcpy(iv_d, iv, 16);
1912 /* Add input data */
1913 size = inputlen - iv_len;
1915 i = fill_sg_comp_from_iov(gather_comp, i,
1918 if (unlikely(size)) {
1919 CPT_LOG_DP_ERR("Insufficient buffer space,"
1920 " size %d needed", size);
1924 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1925 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1928 * Output Scatter List
1933 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1936 i = fill_sg_comp(scatter_comp, i,
1937 offset_dma + OFF_CTRL_LEN,
1940 /* Add output data */
1941 size = outputlen - iv_len;
1943 i = fill_sg_comp_from_iov(scatter_comp, i,
1947 if (unlikely(size)) {
1948 CPT_LOG_DP_ERR("Insufficient buffer space,"
1949 " size %d needed", size);
1953 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1954 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1956 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1958 /* This is DPTR len incase of SG mode */
1959 vq_cmd_w0.s.dlen = size;
1961 m_vaddr = (uint8_t *)m_vaddr + size;
1964 /* cpt alternate completion address saved earlier */
1965 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1966 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1967 rptr_dma = c_dma - 8;
1969 req->ist.ei1 = dptr_dma;
1970 req->ist.ei2 = rptr_dma;
1975 vq_cmd_w3.s.grp = 0;
1976 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1977 offsetof(struct cpt_ctx, zs_ctx);
1979 /* 16 byte aligned cpt res address */
1980 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1981 *req->completion_addr = COMPLETION_CODE_INIT;
1982 req->comp_baddr = c_dma;
1984 /* Fill microcode part of instruction */
1985 req->ist.ei0 = vq_cmd_w0.u64;
1986 req->ist.ei3 = vq_cmd_w3.u64;
1994 static __rte_always_inline void
1995 cpt_kasumi_enc_prep(uint32_t req_flags,
1998 fc_params_t *params,
2003 int32_t inputlen = 0, outputlen = 0;
2004 struct cpt_ctx *cpt_ctx;
2005 uint32_t mac_len = 0;
2007 struct cpt_request_info *req;
2009 uint32_t encr_offset, auth_offset;
2010 uint32_t encr_data_len, auth_data_len;
2012 uint8_t *iv_s, *iv_d, iv_len = 8;
2014 void *m_vaddr, *c_vaddr;
2015 uint64_t m_dma, c_dma;
2016 uint64_t *offset_vaddr, offset_dma;
2017 vq_cmd_word0_t vq_cmd_w0;
2018 vq_cmd_word3_t vq_cmd_w3;
2019 opcode_info_t opcode;
2021 uint32_t g_size_bytes, s_size_bytes;
2022 uint64_t dptr_dma, rptr_dma;
2023 sg_comp_t *gather_comp;
2024 sg_comp_t *scatter_comp;
2026 buf_p = ¶ms->meta_buf;
2027 m_vaddr = buf_p->vaddr;
2028 m_dma = buf_p->dma_addr;
2030 encr_offset = ENCR_OFFSET(d_offs) / 8;
2031 auth_offset = AUTH_OFFSET(d_offs) / 8;
2032 encr_data_len = ENCR_DLEN(d_lens);
2033 auth_data_len = AUTH_DLEN(d_lens);
2035 cpt_ctx = params->ctx_buf.vaddr;
2036 flags = cpt_ctx->zsk_flags;
2037 mac_len = cpt_ctx->mac_len;
2040 iv_s = params->iv_buf;
2042 iv_s = params->auth_iv_buf;
2044 dir = iv_s[8] & 0x1;
2047 * Save initial space that followed app data for completion code &
2048 * alternate completion code to fall in same cache line as app data
2050 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2051 m_dma += COMPLETION_CODE_SIZE;
2052 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2055 c_vaddr = (uint8_t *)m_vaddr + size;
2056 c_dma = m_dma + size;
2057 size += sizeof(cpt_res_s_t);
2059 m_vaddr = (uint8_t *)m_vaddr + size;
2062 /* Reserve memory for cpt request info */
2065 size = sizeof(struct cpt_request_info);
2066 m_vaddr = (uint8_t *)m_vaddr + size;
2069 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2071 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2072 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2073 (dir << 4) | (0 << 3) | (flags & 0x7));
2076 * GP op header, lengths are expected in bits.
2079 vq_cmd_w0.s.param1 = encr_data_len;
2080 vq_cmd_w0.s.param2 = auth_data_len;
2081 vq_cmd_w0.s.opcode = opcode.flags;
2083 /* consider iv len */
2085 encr_offset += iv_len;
2086 auth_offset += iv_len;
2089 /* save space for offset ctrl and iv */
2090 offset_vaddr = m_vaddr;
2093 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2094 m_dma += OFF_CTRL_LEN + iv_len;
2096 /* DPTR has SG list */
2097 in_buffer = m_vaddr;
2100 ((uint16_t *)in_buffer)[0] = 0;
2101 ((uint16_t *)in_buffer)[1] = 0;
2103 /* TODO Add error check if space will be sufficient */
2104 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2111 /* Offset control word followed by iv */
2114 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2115 outputlen = inputlen;
2116 /* iv offset is 0 */
2117 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2119 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2120 outputlen = mac_len;
2121 /* iv offset is 0 */
2122 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2125 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2128 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2129 memcpy(iv_d, iv_s, iv_len);
2132 size = inputlen - iv_len;
2134 i = fill_sg_comp_from_iov(gather_comp, i,
2138 if (unlikely(size)) {
2139 CPT_LOG_DP_ERR("Insufficient buffer space,"
2140 " size %d needed", size);
2144 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2145 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2148 * Output Scatter List
2152 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2155 /* IV in SLIST only for F8 */
2161 i = fill_sg_comp(scatter_comp, i,
2162 offset_dma + OFF_CTRL_LEN,
2166 /* Add output data */
2167 if (req_flags & VALID_MAC_BUF) {
2168 size = outputlen - iv_len - mac_len;
2170 i = fill_sg_comp_from_iov(scatter_comp, i,
2174 if (unlikely(size)) {
2175 CPT_LOG_DP_ERR("Insufficient buffer space,"
2176 " size %d needed", size);
2183 i = fill_sg_comp_from_buf(scatter_comp, i,
2187 /* Output including mac */
2188 size = outputlen - iv_len;
2190 i = fill_sg_comp_from_iov(scatter_comp, i,
2194 if (unlikely(size)) {
2195 CPT_LOG_DP_ERR("Insufficient buffer space,"
2196 " size %d needed", size);
2201 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2202 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2204 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2206 /* This is DPTR len incase of SG mode */
2207 vq_cmd_w0.s.dlen = size;
2209 m_vaddr = (uint8_t *)m_vaddr + size;
2212 /* cpt alternate completion address saved earlier */
2213 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2214 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2215 rptr_dma = c_dma - 8;
2217 req->ist.ei1 = dptr_dma;
2218 req->ist.ei2 = rptr_dma;
2222 vq_cmd_w3.s.grp = 0;
2223 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2224 offsetof(struct cpt_ctx, k_ctx);
2226 /* 16 byte aligned cpt res address */
2227 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2228 *req->completion_addr = COMPLETION_CODE_INIT;
2229 req->comp_baddr = c_dma;
2231 /* Fill microcode part of instruction */
2232 req->ist.ei0 = vq_cmd_w0.u64;
2233 req->ist.ei3 = vq_cmd_w3.u64;
2241 static __rte_always_inline void
2242 cpt_kasumi_dec_prep(uint64_t d_offs,
2244 fc_params_t *params,
2249 int32_t inputlen = 0, outputlen;
2250 struct cpt_ctx *cpt_ctx;
2251 uint8_t i = 0, iv_len = 8;
2252 struct cpt_request_info *req;
2254 uint32_t encr_offset;
2255 uint32_t encr_data_len;
2258 void *m_vaddr, *c_vaddr;
2259 uint64_t m_dma, c_dma;
2260 uint64_t *offset_vaddr, offset_dma;
2261 vq_cmd_word0_t vq_cmd_w0;
2262 vq_cmd_word3_t vq_cmd_w3;
2263 opcode_info_t opcode;
2265 uint32_t g_size_bytes, s_size_bytes;
2266 uint64_t dptr_dma, rptr_dma;
2267 sg_comp_t *gather_comp;
2268 sg_comp_t *scatter_comp;
2270 buf_p = ¶ms->meta_buf;
2271 m_vaddr = buf_p->vaddr;
2272 m_dma = buf_p->dma_addr;
2274 encr_offset = ENCR_OFFSET(d_offs) / 8;
2275 encr_data_len = ENCR_DLEN(d_lens);
2277 cpt_ctx = params->ctx_buf.vaddr;
2278 flags = cpt_ctx->zsk_flags;
2280 * Save initial space that followed app data for completion code &
2281 * alternate completion code to fall in same cache line as app data
2283 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2284 m_dma += COMPLETION_CODE_SIZE;
2285 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2288 c_vaddr = (uint8_t *)m_vaddr + size;
2289 c_dma = m_dma + size;
2290 size += sizeof(cpt_res_s_t);
2292 m_vaddr = (uint8_t *)m_vaddr + size;
2295 /* Reserve memory for cpt request info */
2298 size = sizeof(struct cpt_request_info);
2299 m_vaddr = (uint8_t *)m_vaddr + size;
2302 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2304 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2305 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2306 (dir << 4) | (0 << 3) | (flags & 0x7));
2309 * GP op header, lengths are expected in bits.
2312 vq_cmd_w0.s.param1 = encr_data_len;
2313 vq_cmd_w0.s.opcode = opcode.flags;
2315 /* consider iv len */
2316 encr_offset += iv_len;
2318 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2319 outputlen = inputlen;
2321 /* save space for offset ctrl & iv */
2322 offset_vaddr = m_vaddr;
2325 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2326 m_dma += OFF_CTRL_LEN + iv_len;
2328 /* DPTR has SG list */
2329 in_buffer = m_vaddr;
2332 ((uint16_t *)in_buffer)[0] = 0;
2333 ((uint16_t *)in_buffer)[1] = 0;
2335 /* TODO Add error check if space will be sufficient */
2336 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2343 /* Offset control word followed by iv */
2344 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2346 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2349 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2350 params->iv_buf, iv_len);
2352 /* Add input data */
2353 size = inputlen - iv_len;
2355 i = fill_sg_comp_from_iov(gather_comp, i,
2358 if (unlikely(size)) {
2359 CPT_LOG_DP_ERR("Insufficient buffer space,"
2360 " size %d needed", size);
2364 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2365 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2368 * Output Scatter List
2372 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2375 i = fill_sg_comp(scatter_comp, i,
2376 offset_dma + OFF_CTRL_LEN,
2379 /* Add output data */
2380 size = outputlen - iv_len;
2382 i = fill_sg_comp_from_iov(scatter_comp, i,
2385 if (unlikely(size)) {
2386 CPT_LOG_DP_ERR("Insufficient buffer space,"
2387 " size %d needed", size);
2391 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2392 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2394 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2396 /* This is DPTR len incase of SG mode */
2397 vq_cmd_w0.s.dlen = size;
2399 m_vaddr = (uint8_t *)m_vaddr + size;
2402 /* cpt alternate completion address saved earlier */
2403 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2404 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2405 rptr_dma = c_dma - 8;
2407 req->ist.ei1 = dptr_dma;
2408 req->ist.ei2 = rptr_dma;
2412 vq_cmd_w3.s.grp = 0;
2413 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2414 offsetof(struct cpt_ctx, k_ctx);
2416 /* 16 byte aligned cpt res address */
2417 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2418 *req->completion_addr = COMPLETION_CODE_INIT;
2419 req->comp_baddr = c_dma;
2421 /* Fill microcode part of instruction */
2422 req->ist.ei0 = vq_cmd_w0.u64;
2423 req->ist.ei3 = vq_cmd_w3.u64;
2431 static __rte_always_inline void *
2432 cpt_fc_dec_hmac_prep(uint32_t flags,
2435 fc_params_t *fc_params,
2438 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2440 void *prep_req = NULL;
2442 fc_type = ctx->fc_type;
2444 if (likely(fc_type == FC_GEN)) {
2445 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2447 } else if (fc_type == ZUC_SNOW3G) {
2448 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2450 } else if (fc_type == KASUMI) {
2451 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2455 * For AUTH_ONLY case,
2456 * MC only supports digest generation and verification
2457 * should be done in software by memcmp()
2463 static __rte_always_inline void *__hot
2464 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2465 fc_params_t *fc_params, void *op)
2467 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2469 void *prep_req = NULL;
2471 fc_type = ctx->fc_type;
2473 /* Common api for rest of the ops */
2474 if (likely(fc_type == FC_GEN)) {
2475 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2477 } else if (fc_type == ZUC_SNOW3G) {
2478 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2480 } else if (fc_type == KASUMI) {
2481 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2483 } else if (fc_type == HASH_HMAC) {
2484 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2490 static __rte_always_inline int
2491 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2492 uint16_t key_len, uint16_t mac_len)
2494 struct cpt_ctx *cpt_ctx = ctx;
2495 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2496 uint64_t *ctrl_flags = NULL;
2498 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2503 /* No support for AEAD yet */
2504 if (cpt_ctx->enc_cipher)
2506 /* For ZUC/SNOW3G/Kasumi */
2509 cpt_ctx->snow3g = 1;
2510 gen_key_snow3g(key, keyx);
2511 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2512 cpt_ctx->fc_type = ZUC_SNOW3G;
2513 cpt_ctx->zsk_flags = 0x1;
2516 cpt_ctx->snow3g = 0;
2517 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2518 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2519 cpt_ctx->fc_type = ZUC_SNOW3G;
2520 cpt_ctx->zsk_flags = 0x1;
2523 /* Kasumi ECB mode */
2525 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2526 cpt_ctx->fc_type = KASUMI;
2527 cpt_ctx->zsk_flags = 0x1;
2530 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2531 cpt_ctx->fc_type = KASUMI;
2532 cpt_ctx->zsk_flags = 0x1;
2537 cpt_ctx->mac_len = 4;
2538 cpt_ctx->hash_type = type;
2542 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2543 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2544 cpt_ctx->fc_type = HASH_HMAC;
2547 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2548 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2550 /* For GMAC auth, cipher must be NULL */
2551 if (type == GMAC_TYPE)
2552 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2554 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2555 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2559 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2560 memcpy(cpt_ctx->auth_key, key, key_len);
2561 cpt_ctx->auth_key_len = key_len;
2562 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2563 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2564 memcpy(fctx->hmac.opad, key, key_len);
2565 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2567 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2571 static __rte_always_inline int
2572 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2573 struct cpt_sess_misc *sess)
2575 struct rte_crypto_aead_xform *aead_form;
2576 cipher_type_t enc_type = 0; /* NULL Cipher type */
2577 auth_type_t auth_type = 0; /* NULL Auth type */
2578 uint32_t cipher_key_len = 0;
2579 uint8_t aes_gcm = 0;
2580 aead_form = &xform->aead;
2581 void *ctx = SESS_PRIV(sess);
2583 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2584 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2585 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2586 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2587 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2588 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2589 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2590 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2592 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2595 switch (aead_form->algo) {
2596 case RTE_CRYPTO_AEAD_AES_GCM:
2598 cipher_key_len = 16;
2601 case RTE_CRYPTO_AEAD_AES_CCM:
2602 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2606 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2610 if (aead_form->key.length < cipher_key_len) {
2611 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2612 (unsigned int long)aead_form->key.length);
2616 sess->aes_gcm = aes_gcm;
2617 sess->mac_len = aead_form->digest_length;
2618 sess->iv_offset = aead_form->iv.offset;
2619 sess->iv_length = aead_form->iv.length;
2620 sess->aad_length = aead_form->aad_length;
2622 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2623 aead_form->key.length, NULL);
2625 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2630 static __rte_always_inline int
2631 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2632 struct cpt_sess_misc *sess)
2634 struct rte_crypto_cipher_xform *c_form;
2635 cipher_type_t enc_type = 0; /* NULL Cipher type */
2636 uint32_t cipher_key_len = 0;
2637 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2639 c_form = &xform->cipher;
2641 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2642 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2643 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2644 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2646 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2650 switch (c_form->algo) {
2651 case RTE_CRYPTO_CIPHER_AES_CBC:
2653 cipher_key_len = 16;
2655 case RTE_CRYPTO_CIPHER_3DES_CBC:
2656 enc_type = DES3_CBC;
2657 cipher_key_len = 24;
2659 case RTE_CRYPTO_CIPHER_DES_CBC:
2660 /* DES is implemented using 3DES in hardware */
2661 enc_type = DES3_CBC;
2664 case RTE_CRYPTO_CIPHER_AES_CTR:
2666 cipher_key_len = 16;
2669 case RTE_CRYPTO_CIPHER_NULL:
2673 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2674 enc_type = KASUMI_F8_ECB;
2675 cipher_key_len = 16;
2678 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2679 enc_type = SNOW3G_UEA2;
2680 cipher_key_len = 16;
2683 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2684 enc_type = ZUC_EEA3;
2685 cipher_key_len = 16;
2688 case RTE_CRYPTO_CIPHER_AES_XTS:
2690 cipher_key_len = 16;
2692 case RTE_CRYPTO_CIPHER_3DES_ECB:
2693 enc_type = DES3_ECB;
2694 cipher_key_len = 24;
2696 case RTE_CRYPTO_CIPHER_AES_ECB:
2698 cipher_key_len = 16;
2700 case RTE_CRYPTO_CIPHER_3DES_CTR:
2701 case RTE_CRYPTO_CIPHER_AES_F8:
2702 case RTE_CRYPTO_CIPHER_ARC4:
2703 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2707 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2712 if (c_form->key.length < cipher_key_len) {
2713 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2714 (unsigned long) c_form->key.length);
2718 sess->zsk_flag = zsk_flag;
2720 sess->aes_ctr = aes_ctr;
2721 sess->iv_offset = c_form->iv.offset;
2722 sess->iv_length = c_form->iv.length;
2723 sess->is_null = is_null;
2725 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2726 c_form->key.length, NULL);
2731 static __rte_always_inline int
2732 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2733 struct cpt_sess_misc *sess)
2735 struct rte_crypto_auth_xform *a_form;
2736 auth_type_t auth_type = 0; /* NULL Auth type */
2737 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2739 a_form = &xform->auth;
2741 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2742 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2743 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2744 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2746 CPT_LOG_DP_ERR("Unknown auth operation");
2750 if (a_form->key.length > 64) {
2751 CPT_LOG_DP_ERR("Auth key length is big");
2755 switch (a_form->algo) {
2756 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2758 case RTE_CRYPTO_AUTH_SHA1:
2759 auth_type = SHA1_TYPE;
2761 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2762 case RTE_CRYPTO_AUTH_SHA256:
2763 auth_type = SHA2_SHA256;
2765 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2766 case RTE_CRYPTO_AUTH_SHA512:
2767 auth_type = SHA2_SHA512;
2769 case RTE_CRYPTO_AUTH_AES_GMAC:
2770 auth_type = GMAC_TYPE;
2773 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2774 case RTE_CRYPTO_AUTH_SHA224:
2775 auth_type = SHA2_SHA224;
2777 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2778 case RTE_CRYPTO_AUTH_SHA384:
2779 auth_type = SHA2_SHA384;
2781 case RTE_CRYPTO_AUTH_MD5_HMAC:
2782 case RTE_CRYPTO_AUTH_MD5:
2783 auth_type = MD5_TYPE;
2785 case RTE_CRYPTO_AUTH_KASUMI_F9:
2786 auth_type = KASUMI_F9_ECB;
2788 * Indicate that direction needs to be taken out
2793 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2794 auth_type = SNOW3G_UIA2;
2797 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2798 auth_type = ZUC_EIA3;
2801 case RTE_CRYPTO_AUTH_NULL:
2805 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2806 case RTE_CRYPTO_AUTH_AES_CMAC:
2807 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2808 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2812 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2817 sess->zsk_flag = zsk_flag;
2818 sess->aes_gcm = aes_gcm;
2819 sess->mac_len = a_form->digest_length;
2820 sess->is_null = is_null;
2822 sess->auth_iv_offset = a_form->iv.offset;
2823 sess->auth_iv_length = a_form->iv.length;
2825 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2826 a_form->key.length, a_form->digest_length);
2831 static __rte_always_inline int
2832 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2833 struct cpt_sess_misc *sess)
2835 struct rte_crypto_auth_xform *a_form;
2836 cipher_type_t enc_type = 0; /* NULL Cipher type */
2837 auth_type_t auth_type = 0; /* NULL Auth type */
2838 void *ctx = SESS_PRIV(sess);
2840 a_form = &xform->auth;
2842 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2843 sess->cpt_op |= CPT_OP_ENCODE;
2844 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2845 sess->cpt_op |= CPT_OP_DECODE;
2847 CPT_LOG_DP_ERR("Unknown auth operation");
2851 switch (a_form->algo) {
2852 case RTE_CRYPTO_AUTH_AES_GMAC:
2854 auth_type = GMAC_TYPE;
2857 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2865 sess->iv_offset = a_form->iv.offset;
2866 sess->iv_length = a_form->iv.length;
2867 sess->mac_len = a_form->digest_length;
2869 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2870 a_form->key.length, NULL);
2871 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2876 static __rte_always_inline void *
2877 alloc_op_meta(struct rte_mbuf *m_src,
2880 struct rte_mempool *cpt_meta_pool)
2884 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2885 if (likely(m_src && (m_src->nb_segs == 1))) {
2889 /* Check if tailroom is sufficient to hold meta data */
2890 tailroom = rte_pktmbuf_tailroom(m_src);
2891 if (likely(tailroom > len + 8)) {
2892 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2893 mphys = m_src->buf_physaddr + m_src->buf_len;
2897 buf->dma_addr = mphys;
2899 /* Indicate that this is a mbuf allocated mdata */
2900 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2905 RTE_SET_USED(m_src);
2908 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2912 buf->dma_addr = rte_mempool_virt2iova(mdata);
2919 * cpt_free_metabuf - free metabuf to mempool.
2920 * @param instance: pointer to instance.
2921 * @param objp: pointer to the metabuf.
2923 static __rte_always_inline void
2924 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2926 bool nofree = ((uintptr_t)mdata & 1ull);
2930 rte_mempool_put(cpt_meta_pool, mdata);
2933 static __rte_always_inline uint32_t
2934 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2935 iov_ptr_t *iovec, uint32_t start_offset)
2938 void *seg_data = NULL;
2939 phys_addr_t seg_phys;
2940 int32_t seg_size = 0;
2947 if (!start_offset) {
2948 seg_data = rte_pktmbuf_mtod(pkt, void *);
2949 seg_phys = rte_pktmbuf_mtophys(pkt);
2950 seg_size = pkt->data_len;
2952 while (start_offset >= pkt->data_len) {
2953 start_offset -= pkt->data_len;
2957 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2958 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
2959 seg_size = pkt->data_len - start_offset;
2965 iovec->bufs[index].vaddr = seg_data;
2966 iovec->bufs[index].dma_addr = seg_phys;
2967 iovec->bufs[index].size = seg_size;
2971 while (unlikely(pkt != NULL)) {
2972 seg_data = rte_pktmbuf_mtod(pkt, void *);
2973 seg_phys = rte_pktmbuf_mtophys(pkt);
2974 seg_size = pkt->data_len;
2978 iovec->bufs[index].vaddr = seg_data;
2979 iovec->bufs[index].dma_addr = seg_phys;
2980 iovec->bufs[index].size = seg_size;
2987 iovec->buf_cnt = index;
2991 static __rte_always_inline uint32_t
2992 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2997 void *seg_data = NULL;
2998 phys_addr_t seg_phys;
2999 uint32_t seg_size = 0;
3002 seg_data = rte_pktmbuf_mtod(pkt, void *);
3003 seg_phys = rte_pktmbuf_mtophys(pkt);
3004 seg_size = pkt->data_len;
3007 if (likely(!pkt->next)) {
3008 uint32_t headroom, tailroom;
3010 *flags |= SINGLE_BUF_INPLACE;
3011 headroom = rte_pktmbuf_headroom(pkt);
3012 tailroom = rte_pktmbuf_tailroom(pkt);
3013 if (likely((headroom >= 24) &&
3015 /* In 83XX this is prerequivisit for Direct mode */
3016 *flags |= SINGLE_BUF_HEADTAILROOM;
3018 param->bufs[0].vaddr = seg_data;
3019 param->bufs[0].dma_addr = seg_phys;
3020 param->bufs[0].size = seg_size;
3023 iovec = param->src_iov;
3024 iovec->bufs[index].vaddr = seg_data;
3025 iovec->bufs[index].dma_addr = seg_phys;
3026 iovec->bufs[index].size = seg_size;
3030 while (unlikely(pkt != NULL)) {
3031 seg_data = rte_pktmbuf_mtod(pkt, void *);
3032 seg_phys = rte_pktmbuf_mtophys(pkt);
3033 seg_size = pkt->data_len;
3038 iovec->bufs[index].vaddr = seg_data;
3039 iovec->bufs[index].dma_addr = seg_phys;
3040 iovec->bufs[index].size = seg_size;
3047 iovec->buf_cnt = index;
3051 static __rte_always_inline int
3052 fill_fc_params(struct rte_crypto_op *cop,
3053 struct cpt_sess_misc *sess_misc,
3054 struct cpt_qp_meta_info *m_info,
3059 struct rte_crypto_sym_op *sym_op = cop->sym;
3062 uint32_t mc_hash_off;
3064 uint64_t d_offs, d_lens;
3065 struct rte_mbuf *m_src, *m_dst;
3066 uint8_t cpt_op = sess_misc->cpt_op;
3067 #ifdef CPT_ALWAYS_USE_SG_MODE
3068 uint8_t inplace = 0;
3070 uint8_t inplace = 1;
3072 fc_params_t fc_params;
3073 char src[SRC_IOV_SIZE];
3074 char dst[SRC_IOV_SIZE];
3078 if (likely(sess_misc->iv_length)) {
3079 flags |= VALID_IV_BUF;
3080 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3081 uint8_t *, sess_misc->iv_offset);
3082 if (sess_misc->aes_ctr &&
3083 unlikely(sess_misc->iv_length != 16)) {
3084 memcpy((uint8_t *)iv_buf,
3085 rte_crypto_op_ctod_offset(cop,
3086 uint8_t *, sess_misc->iv_offset), 12);
3087 iv_buf[3] = rte_cpu_to_be_32(0x1);
3088 fc_params.iv_buf = iv_buf;
3092 if (sess_misc->zsk_flag) {
3093 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3095 sess_misc->auth_iv_offset);
3096 if (sess_misc->zsk_flag != ZS_EA)
3099 m_src = sym_op->m_src;
3100 m_dst = sym_op->m_dst;
3102 if (sess_misc->aes_gcm) {
3107 d_offs = sym_op->aead.data.offset;
3108 d_lens = sym_op->aead.data.length;
3109 mc_hash_off = sym_op->aead.data.offset +
3110 sym_op->aead.data.length;
3112 aad_data = sym_op->aead.aad.data;
3113 aad_len = sess_misc->aad_length;
3114 if (likely((aad_data + aad_len) ==
3115 rte_pktmbuf_mtod_offset(m_src,
3117 sym_op->aead.data.offset))) {
3118 d_offs = (d_offs - aad_len) | (d_offs << 16);
3119 d_lens = (d_lens + aad_len) | (d_lens << 32);
3121 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3122 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3123 fc_params.aad_buf.size = aad_len;
3124 flags |= VALID_AAD_BUF;
3126 d_offs = d_offs << 16;
3127 d_lens = d_lens << 32;
3130 salt = fc_params.iv_buf;
3131 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3132 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3133 sess_misc->salt = *(uint32_t *)salt;
3135 fc_params.iv_buf = salt + 4;
3136 if (likely(sess_misc->mac_len)) {
3137 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3143 /* hmac immediately following data is best case */
3144 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3146 (uint8_t *)sym_op->aead.digest.data)) {
3147 flags |= VALID_MAC_BUF;
3148 fc_params.mac_buf.size = sess_misc->mac_len;
3149 fc_params.mac_buf.vaddr =
3150 sym_op->aead.digest.data;
3151 fc_params.mac_buf.dma_addr =
3152 sym_op->aead.digest.phys_addr;
3157 d_offs = sym_op->cipher.data.offset;
3158 d_lens = sym_op->cipher.data.length;
3159 mc_hash_off = sym_op->cipher.data.offset +
3160 sym_op->cipher.data.length;
3161 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3162 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3164 if (mc_hash_off < (sym_op->auth.data.offset +
3165 sym_op->auth.data.length)){
3166 mc_hash_off = (sym_op->auth.data.offset +
3167 sym_op->auth.data.length);
3169 /* for gmac, salt should be updated like in gcm */
3170 if (unlikely(sess_misc->is_gmac)) {
3172 salt = fc_params.iv_buf;
3173 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3174 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3175 sess_misc->salt = *(uint32_t *)salt;
3177 fc_params.iv_buf = salt + 4;
3179 if (likely(sess_misc->mac_len)) {
3182 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3186 /* hmac immediately following data is best case */
3187 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3189 (uint8_t *)sym_op->auth.digest.data)) {
3190 flags |= VALID_MAC_BUF;
3191 fc_params.mac_buf.size =
3193 fc_params.mac_buf.vaddr =
3194 sym_op->auth.digest.data;
3195 fc_params.mac_buf.dma_addr =
3196 sym_op->auth.digest.phys_addr;
3201 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3202 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3204 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3207 if (likely(!m_dst && inplace)) {
3208 /* Case of single buffer without AAD buf or
3209 * separate mac buf in place and
3212 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3214 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3217 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3223 /* Out of place processing */
3224 fc_params.src_iov = (void *)src;
3225 fc_params.dst_iov = (void *)dst;
3227 /* Store SG I/O in the api for reuse */
3228 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3229 CPT_LOG_DP_ERR("Prepare src iov failed");
3234 if (unlikely(m_dst != NULL)) {
3237 /* Try to make room as much as src has */
3238 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3240 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3241 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3242 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3243 CPT_LOG_DP_ERR("Not enough space in "
3252 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3253 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3259 fc_params.dst_iov = (void *)src;
3263 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3264 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3265 m_info->lb_mlen, m_info->pool);
3267 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3268 m_info->sg_mlen, m_info->pool);
3270 if (unlikely(mdata == NULL)) {
3271 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3276 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3277 op[0] = (uintptr_t)mdata;
3278 op[1] = (uintptr_t)cop;
3279 op[2] = op[3] = 0; /* Used to indicate auth verify */
3280 space += 4 * sizeof(uint64_t);
3282 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3283 fc_params.meta_buf.dma_addr += space;
3284 fc_params.meta_buf.size -= space;
3286 /* Finally prepare the instruction */
3287 if (cpt_op & CPT_OP_ENCODE)
3288 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3291 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3294 if (unlikely(*prep_req == NULL)) {
3295 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3297 goto free_mdata_and_exit;
3304 free_mdata_and_exit:
3305 free_op_meta(mdata, m_info->pool);
3310 static __rte_always_inline void
3311 compl_auth_verify(struct rte_crypto_op *op,
3316 struct rte_crypto_sym_op *sym_op = op->sym;
3318 if (sym_op->auth.digest.data)
3319 mac = sym_op->auth.digest.data;
3321 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3323 sym_op->auth.data.length +
3324 sym_op->auth.data.offset);
3326 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3330 if (memcmp(mac, gen_mac, mac_len))
3331 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3333 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3336 static __rte_always_inline int
3337 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3339 struct rte_crypto_sym_xform *chain;
3341 CPT_PMD_INIT_FUNC_TRACE();
3343 if (cpt_is_algo_supported(xform))
3348 switch (chain->type) {
3349 case RTE_CRYPTO_SYM_XFORM_AEAD:
3350 if (fill_sess_aead(chain, sess))
3353 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3354 if (fill_sess_cipher(chain, sess))
3357 case RTE_CRYPTO_SYM_XFORM_AUTH:
3358 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3359 if (fill_sess_gmac(chain, sess))
3362 if (fill_sess_auth(chain, sess))
3367 CPT_LOG_DP_ERR("Invalid crypto xform type");
3370 chain = chain->next;
3379 static __rte_always_inline void
3380 find_kasumif9_direction_and_length(uint8_t *src,
3381 uint32_t counter_num_bytes,
3382 uint32_t *addr_length_in_bits,
3383 uint8_t *addr_direction)
3388 while (!found && counter_num_bytes > 0) {
3389 counter_num_bytes--;
3390 if (src[counter_num_bytes] == 0x00)
3392 pos = rte_bsf32(src[counter_num_bytes]);
3394 if (likely(counter_num_bytes > 0)) {
3395 last_byte = src[counter_num_bytes - 1];
3396 *addr_direction = last_byte & 0x1;
3397 *addr_length_in_bits = counter_num_bytes * 8
3401 last_byte = src[counter_num_bytes];
3402 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3403 *addr_length_in_bits = counter_num_bytes * 8
3411 * This handles all auth only except AES_GMAC
3413 static __rte_always_inline int
3414 fill_digest_params(struct rte_crypto_op *cop,
3415 struct cpt_sess_misc *sess,
3416 struct cpt_qp_meta_info *m_info,
3421 struct rte_crypto_sym_op *sym_op = cop->sym;
3425 uint32_t auth_range_off;
3427 uint64_t d_offs = 0, d_lens;
3428 struct rte_mbuf *m_src, *m_dst;
3429 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3430 uint16_t mac_len = sess->mac_len;
3432 char src[SRC_IOV_SIZE];
3436 memset(¶ms, 0, sizeof(fc_params_t));
3438 m_src = sym_op->m_src;
3440 /* For just digest lets force mempool alloc */
3441 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3443 if (mdata == NULL) {
3448 mphys = params.meta_buf.dma_addr;
3451 op[0] = (uintptr_t)mdata;
3452 op[1] = (uintptr_t)cop;
3453 op[2] = op[3] = 0; /* Used to indicate auth verify */
3454 space += 4 * sizeof(uint64_t);
3456 auth_range_off = sym_op->auth.data.offset;
3458 flags = VALID_MAC_BUF;
3459 params.src_iov = (void *)src;
3460 if (unlikely(sess->zsk_flag)) {
3462 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3463 * we will send pass through even for auth only case,
3466 d_offs = auth_range_off;
3468 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3469 uint8_t *, sess->auth_iv_offset);
3470 if (sess->zsk_flag == K_F9) {
3471 uint32_t length_in_bits, num_bytes;
3472 uint8_t *src, direction = 0;
3474 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3477 * This is kasumi f9, take direction from
3480 length_in_bits = cop->sym->auth.data.length;
3481 num_bytes = (length_in_bits >> 3);
3482 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3483 find_kasumif9_direction_and_length(src,
3487 length_in_bits -= 64;
3488 cop->sym->auth.data.offset += 64;
3489 d_offs = cop->sym->auth.data.offset;
3490 auth_range_off = d_offs / 8;
3491 cop->sym->auth.data.length = length_in_bits;
3493 /* Store it at end of auth iv */
3494 iv_buf[8] = direction;
3495 params.auth_iv_buf = iv_buf;
3499 d_lens = sym_op->auth.data.length;
3501 params.ctx_buf.vaddr = SESS_PRIV(sess);
3502 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3504 if (auth_op == CPT_OP_AUTH_GENERATE) {
3505 if (sym_op->auth.digest.data) {
3507 * Digest to be generated
3508 * in separate buffer
3510 params.mac_buf.size =
3512 params.mac_buf.vaddr =
3513 sym_op->auth.digest.data;
3514 params.mac_buf.dma_addr =
3515 sym_op->auth.digest.phys_addr;
3517 uint32_t off = sym_op->auth.data.offset +
3518 sym_op->auth.data.length;
3519 int32_t dlen, space;
3521 m_dst = sym_op->m_dst ?
3522 sym_op->m_dst : sym_op->m_src;
3523 dlen = rte_pktmbuf_pkt_len(m_dst);
3525 space = off + mac_len - dlen;
3527 if (!rte_pktmbuf_append(m_dst, space)) {
3528 CPT_LOG_DP_ERR("Failed to extend "
3529 "mbuf by %uB", space);
3531 goto free_mdata_and_exit;
3534 params.mac_buf.vaddr =
3535 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3536 params.mac_buf.dma_addr =
3537 rte_pktmbuf_mtophys_offset(m_dst, off);
3538 params.mac_buf.size = mac_len;
3541 /* Need space for storing generated mac */
3542 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3543 params.mac_buf.dma_addr = mphys + space;
3544 params.mac_buf.size = mac_len;
3545 space += RTE_ALIGN_CEIL(mac_len, 8);
3546 op[2] = (uintptr_t)params.mac_buf.vaddr;
3550 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3551 params.meta_buf.dma_addr = mphys + space;
3552 params.meta_buf.size -= space;
3554 /* Out of place processing */
3555 params.src_iov = (void *)src;
3557 /*Store SG I/O in the api for reuse */
3558 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3559 CPT_LOG_DP_ERR("Prepare src iov failed");
3561 goto free_mdata_and_exit;
3564 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3565 if (unlikely(*prep_req == NULL)) {
3567 goto free_mdata_and_exit;
3574 free_mdata_and_exit:
3575 free_op_meta(mdata, m_info->pool);
3580 #endif /*_CPT_UCODE_H_ */