1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
9 #include "cpt_common.h"
10 #include "cpt_hw_types.h"
11 #include "cpt_mcode_defines.h"
14 * This file defines functions that are interfaces to microcode spec.
18 static uint8_t zuc_d[32] = {
19 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
20 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
21 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
22 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
25 static __rte_always_inline int
26 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
29 * Microcode only supports the following combination.
30 * Encryption followed by authentication
31 * Authentication followed by decryption
34 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
35 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
36 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
37 /* Unsupported as of now by microcode */
38 CPT_LOG_DP_ERR("Unsupported combination");
41 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
42 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
43 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
44 /* For GMAC auth there is no cipher operation */
45 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
46 xform->next->auth.algo !=
47 RTE_CRYPTO_AUTH_AES_GMAC) {
48 /* Unsupported as of now by microcode */
49 CPT_LOG_DP_ERR("Unsupported combination");
57 static __rte_always_inline void
58 gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
62 for (i = 0; i < 4; i++) {
64 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
65 (ck[base + 2] << 8) | (ck[base + 3]);
66 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
70 static __rte_always_inline void
71 cpt_fc_salt_update(void *ctx,
74 struct cpt_ctx *cpt_ctx = ctx;
75 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
78 static __rte_always_inline int
79 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
91 static __rte_always_inline int
92 cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
108 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
113 key_len = key_len / 2;
114 if (unlikely(key_len == CPT_BYTE_24)) {
115 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
118 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
124 if (unlikely(key_len != 16))
126 /* No support for AEAD yet */
127 if (unlikely(ctx->hash_type))
129 fc_type = ZUC_SNOW3G;
133 if (unlikely(key_len != 16))
135 /* No support for AEAD yet */
136 if (unlikely(ctx->hash_type))
144 ctx->fc_type = fc_type;
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->zsk_flags = 0;
188 static __rte_always_inline void
189 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
193 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
194 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
195 cpt_ctx->zsk_flags = 0;
198 static __rte_always_inline void
199 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
203 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
204 cpt_ctx->zsk_flags = 0;
207 static __rte_always_inline void
208 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
211 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
212 cpt_ctx->zsk_flags = 0;
215 static __rte_always_inline int
216 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
217 uint16_t key_len, uint8_t *salt)
219 struct cpt_ctx *cpt_ctx = ctx;
220 mc_fc_context_t *fctx = &cpt_ctx->fctx;
221 uint64_t *ctrl_flags = NULL;
224 ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
228 if (cpt_ctx->fc_type == FC_GEN) {
229 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
230 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
232 * We need to always say IV is from DPTR as user can
233 * sometimes iverride IV per operation.
235 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
240 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
243 /* CPT performs DES using 3DES with the 8B DES-key
244 * replicated 2 more times to match the 24B 3DES-key.
245 * Eg. If org. key is "0x0a 0x0b", then new key is
246 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
249 /* Skipping the first 8B as it will be copied
250 * in the regular code flow
252 memcpy(fctx->enc.encr_key+key_len, key, key_len);
253 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
257 /* For DES3_ECB IV need to be from CTX. */
258 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
264 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
267 /* Even though iv source is from dptr,
268 * aes_gcm salt is taken from ctx
271 memcpy(fctx->enc.encr_iv, salt, 4);
272 /* Assuming it was just salt update
278 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
281 key_len = key_len / 2;
282 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
284 /* Copy key2 for XTS into ipad */
285 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
286 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
289 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
292 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
295 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
298 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
304 /* Only for FC_GEN case */
306 /* For GMAC auth, cipher must be NULL */
307 if (cpt_ctx->hash_type != GMAC_TYPE)
308 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
310 memcpy(fctx->enc.encr_key, key, key_len);
313 if (ctrl_flags != NULL)
314 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
317 cpt_ctx->enc_cipher = type;
322 static __rte_always_inline uint32_t
323 fill_sg_comp(sg_comp_t *list,
325 phys_addr_t dma_addr,
328 sg_comp_t *to = &list[i>>2];
330 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
331 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
336 static __rte_always_inline uint32_t
337 fill_sg_comp_from_buf(sg_comp_t *list,
341 sg_comp_t *to = &list[i>>2];
343 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
344 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
349 static __rte_always_inline uint32_t
350 fill_sg_comp_from_buf_min(sg_comp_t *list,
355 sg_comp_t *to = &list[i >> 2];
356 uint32_t size = *psize;
359 e_len = (size > from->size) ? from->size : size;
360 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
361 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
368 * This fills the MC expected SGIO list
369 * from IOV given by user.
371 static __rte_always_inline uint32_t
372 fill_sg_comp_from_iov(sg_comp_t *list,
374 iov_ptr_t *from, uint32_t from_offset,
375 uint32_t *psize, buf_ptr_t *extra_buf,
376 uint32_t extra_offset)
379 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
380 uint32_t size = *psize - extra_len;
384 for (j = 0; (j < from->buf_cnt) && size; j++) {
385 phys_addr_t e_dma_addr;
387 sg_comp_t *to = &list[i >> 2];
392 if (unlikely(from_offset)) {
393 if (from_offset >= bufs[j].size) {
394 from_offset -= bufs[j].size;
397 e_dma_addr = bufs[j].dma_addr + from_offset;
398 e_len = (size > (bufs[j].size - from_offset)) ?
399 (bufs[j].size - from_offset) : size;
402 e_dma_addr = bufs[j].dma_addr;
403 e_len = (size > bufs[j].size) ?
407 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
408 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
410 if (extra_len && (e_len >= extra_offset)) {
411 /* Break the data at given offset */
412 uint32_t next_len = e_len - extra_offset;
413 phys_addr_t next_dma = e_dma_addr + extra_offset;
418 e_len = extra_offset;
420 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
423 /* Insert extra data ptr */
428 rte_cpu_to_be_16(extra_buf->size);
430 rte_cpu_to_be_64(extra_buf->dma_addr);
432 /* size already decremented by extra len */
435 /* insert the rest of the data */
439 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
440 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
449 extra_offset -= size;
457 static __rte_always_inline void
458 cpt_digest_gen_prep(uint32_t flags,
460 digest_params_t *params,
464 struct cpt_request_info *req;
466 uint16_t data_len, mac_len, key_len;
467 auth_type_t hash_type;
470 sg_comp_t *gather_comp;
471 sg_comp_t *scatter_comp;
473 uint32_t g_size_bytes, s_size_bytes;
474 uint64_t dptr_dma, rptr_dma;
475 vq_cmd_word0_t vq_cmd_w0;
476 vq_cmd_word3_t vq_cmd_w3;
477 void *c_vaddr, *m_vaddr;
478 uint64_t c_dma, m_dma;
479 opcode_info_t opcode;
481 ctx = params->ctx_buf.vaddr;
482 meta_p = ¶ms->meta_buf;
484 m_vaddr = meta_p->vaddr;
485 m_dma = meta_p->dma_addr;
488 * Save initial space that followed app data for completion code &
489 * alternate completion code to fall in same cache line as app data
491 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
492 m_dma += COMPLETION_CODE_SIZE;
493 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
495 c_vaddr = (uint8_t *)m_vaddr + size;
496 c_dma = m_dma + size;
497 size += sizeof(cpt_res_s_t);
499 m_vaddr = (uint8_t *)m_vaddr + size;
504 size = sizeof(struct cpt_request_info);
505 m_vaddr = (uint8_t *)m_vaddr + size;
508 hash_type = ctx->hash_type;
509 mac_len = ctx->mac_len;
510 key_len = ctx->auth_key_len;
511 data_len = AUTH_DLEN(d_lens);
515 vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
517 opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
518 vq_cmd_w0.s.param1 = key_len;
519 vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
521 opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
522 vq_cmd_w0.s.param1 = 0;
523 vq_cmd_w0.s.dlen = data_len;
528 /* Null auth only case enters the if */
529 if (unlikely(!hash_type && !ctx->enc_cipher)) {
530 opcode.s.major = CPT_MAJOR_OP_MISC;
531 /* Minor op is passthrough */
532 opcode.s.minor = 0x03;
533 /* Send out completion code only */
534 vq_cmd_w0.s.param2 = 0x1;
537 vq_cmd_w0.s.opcode = opcode.flags;
539 /* DPTR has SG list */
543 ((uint16_t *)in_buffer)[0] = 0;
544 ((uint16_t *)in_buffer)[1] = 0;
546 /* TODO Add error check if space will be sufficient */
547 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
556 uint64_t k_dma = params->ctx_buf.dma_addr +
557 offsetof(struct cpt_ctx, auth_key);
559 i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
565 i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
567 if (unlikely(size)) {
568 CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
574 * Looks like we need to support zero data
575 * gather ptr in case of hash & hmac
579 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
580 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
587 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
589 if (flags & VALID_MAC_BUF) {
590 if (unlikely(params->mac_buf.size < mac_len)) {
591 CPT_LOG_DP_ERR("Insufficient MAC size");
596 i = fill_sg_comp_from_buf_min(scatter_comp, i,
597 ¶ms->mac_buf, &size);
600 i = fill_sg_comp_from_iov(scatter_comp, i,
601 params->src_iov, data_len,
603 if (unlikely(size)) {
604 CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
610 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
611 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
613 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
615 /* This is DPTR len incase of SG mode */
616 vq_cmd_w0.s.dlen = size;
618 m_vaddr = (uint8_t *)m_vaddr + size;
621 /* cpt alternate completion address saved earlier */
622 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
623 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
624 rptr_dma = c_dma - 8;
626 req->ist.ei1 = dptr_dma;
627 req->ist.ei2 = rptr_dma;
632 /* 16 byte aligned cpt res address */
633 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
634 *req->completion_addr = COMPLETION_CODE_INIT;
635 req->comp_baddr = c_dma;
637 /* Fill microcode part of instruction */
638 req->ist.ei0 = vq_cmd_w0.u64;
639 req->ist.ei3 = vq_cmd_w3.u64;
647 static __rte_always_inline void
648 cpt_enc_hmac_prep(uint32_t flags,
651 fc_params_t *fc_params,
655 uint32_t iv_offset = 0;
656 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
657 struct cpt_ctx *cpt_ctx;
658 uint32_t cipher_type, hash_type;
659 uint32_t mac_len, size;
661 struct cpt_request_info *req;
662 buf_ptr_t *meta_p, *aad_buf = NULL;
663 uint32_t encr_offset, auth_offset;
664 uint32_t encr_data_len, auth_data_len, aad_len = 0;
665 uint32_t passthrough_len = 0;
666 void *m_vaddr, *offset_vaddr;
667 uint64_t m_dma, offset_dma, ctx_dma;
668 vq_cmd_word0_t vq_cmd_w0;
669 vq_cmd_word3_t vq_cmd_w3;
672 opcode_info_t opcode;
674 meta_p = &fc_params->meta_buf;
675 m_vaddr = meta_p->vaddr;
676 m_dma = meta_p->dma_addr;
678 encr_offset = ENCR_OFFSET(d_offs);
679 auth_offset = AUTH_OFFSET(d_offs);
680 encr_data_len = ENCR_DLEN(d_lens);
681 auth_data_len = AUTH_DLEN(d_lens);
682 if (unlikely(flags & VALID_AAD_BUF)) {
684 * We dont support both aad
685 * and auth data separately
689 aad_len = fc_params->aad_buf.size;
690 aad_buf = &fc_params->aad_buf;
692 cpt_ctx = fc_params->ctx_buf.vaddr;
693 cipher_type = cpt_ctx->enc_cipher;
694 hash_type = cpt_ctx->hash_type;
695 mac_len = cpt_ctx->mac_len;
698 * Save initial space that followed app data for completion code &
699 * alternate completion code to fall in same cache line as app data
701 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
702 m_dma += COMPLETION_CODE_SIZE;
703 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
706 c_vaddr = (uint8_t *)m_vaddr + size;
707 c_dma = m_dma + size;
708 size += sizeof(cpt_res_s_t);
710 m_vaddr = (uint8_t *)m_vaddr + size;
713 /* start cpt request info struct at 8 byte boundary */
714 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
717 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
719 size += sizeof(struct cpt_request_info);
720 m_vaddr = (uint8_t *)m_vaddr + size;
723 if (hash_type == GMAC_TYPE)
726 if (unlikely(!(flags & VALID_IV_BUF))) {
728 iv_offset = ENCR_IV_OFFSET(d_offs);
731 if (unlikely(flags & VALID_AAD_BUF)) {
733 * When AAD is given, data above encr_offset is pass through
734 * Since AAD is given as separate pointer and not as offset,
735 * this is a special case as we need to fragment input data
736 * into passthrough + encr_data and then insert AAD in between.
738 if (hash_type != GMAC_TYPE) {
739 passthrough_len = encr_offset;
740 auth_offset = passthrough_len + iv_len;
741 encr_offset = passthrough_len + aad_len + iv_len;
742 auth_data_len = aad_len + encr_data_len;
744 passthrough_len = 16 + aad_len;
745 auth_offset = passthrough_len + iv_len;
746 auth_data_len = aad_len;
749 encr_offset += iv_len;
750 auth_offset += iv_len;
754 opcode.s.major = CPT_MAJOR_OP_FC;
757 auth_dlen = auth_offset + auth_data_len;
758 enc_dlen = encr_data_len + encr_offset;
759 if (unlikely(encr_data_len & 0xf)) {
760 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
761 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
762 else if (likely((cipher_type == AES_CBC) ||
763 (cipher_type == AES_ECB)))
764 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
767 if (unlikely(hash_type == GMAC_TYPE)) {
768 encr_offset = auth_dlen;
772 if (unlikely(auth_dlen > enc_dlen)) {
773 inputlen = auth_dlen;
774 outputlen = auth_dlen + mac_len;
777 outputlen = enc_dlen + mac_len;
782 vq_cmd_w0.s.param1 = encr_data_len;
783 vq_cmd_w0.s.param2 = auth_data_len;
785 * In 83XX since we have a limitation of
786 * IV & Offset control word not part of instruction
787 * and need to be part of Data Buffer, we check if
788 * head room is there and then only do the Direct mode processing
790 if (likely((flags & SINGLE_BUF_INPLACE) &&
791 (flags & SINGLE_BUF_HEADTAILROOM))) {
792 void *dm_vaddr = fc_params->bufs[0].vaddr;
793 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
795 * This flag indicates that there is 24 bytes head room and
796 * 8 bytes tail room available, so that we get to do
797 * DIRECT MODE with limitation
800 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
801 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
804 req->ist.ei1 = offset_dma;
805 /* RPTR should just exclude offset control word */
806 req->ist.ei2 = dm_dma_addr - iv_len;
807 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
808 + outputlen - iv_len);
810 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
812 vq_cmd_w0.s.opcode = opcode.flags;
814 if (likely(iv_len)) {
815 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
817 uint64_t *src = fc_params->iv_buf;
822 *(uint64_t *)offset_vaddr =
823 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
824 ((uint64_t)iv_offset << 8) |
825 ((uint64_t)auth_offset));
828 uint32_t i, g_size_bytes, s_size_bytes;
829 uint64_t dptr_dma, rptr_dma;
830 sg_comp_t *gather_comp;
831 sg_comp_t *scatter_comp;
834 /* This falls under strict SG mode */
835 offset_vaddr = m_vaddr;
837 size = OFF_CTRL_LEN + iv_len;
839 m_vaddr = (uint8_t *)m_vaddr + size;
842 opcode.s.major |= CPT_DMA_MODE;
844 vq_cmd_w0.s.opcode = opcode.flags;
846 if (likely(iv_len)) {
847 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
849 uint64_t *src = fc_params->iv_buf;
854 *(uint64_t *)offset_vaddr =
855 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
856 ((uint64_t)iv_offset << 8) |
857 ((uint64_t)auth_offset));
859 /* DPTR has SG list */
863 ((uint16_t *)in_buffer)[0] = 0;
864 ((uint16_t *)in_buffer)[1] = 0;
866 /* TODO Add error check if space will be sufficient */
867 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
875 /* Offset control word that includes iv */
876 i = fill_sg_comp(gather_comp, i, offset_dma,
877 OFF_CTRL_LEN + iv_len);
880 size = inputlen - iv_len;
882 uint32_t aad_offset = aad_len ? passthrough_len : 0;
884 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
885 i = fill_sg_comp_from_buf_min(gather_comp, i,
889 i = fill_sg_comp_from_iov(gather_comp, i,
892 aad_buf, aad_offset);
895 if (unlikely(size)) {
896 CPT_LOG_DP_ERR("Insufficient buffer space,"
897 " size %d needed", size);
901 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
902 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
905 * Output Scatter list
909 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
912 if (likely(iv_len)) {
913 i = fill_sg_comp(scatter_comp, i,
914 offset_dma + OFF_CTRL_LEN,
918 /* output data or output data + digest*/
919 if (unlikely(flags & VALID_MAC_BUF)) {
920 size = outputlen - iv_len - mac_len;
922 uint32_t aad_offset =
923 aad_len ? passthrough_len : 0;
925 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
926 i = fill_sg_comp_from_buf_min(
932 i = fill_sg_comp_from_iov(scatter_comp,
940 if (unlikely(size)) {
941 CPT_LOG_DP_ERR("Insufficient buffer"
942 " space, size %d needed",
949 i = fill_sg_comp_from_buf(scatter_comp, i,
950 &fc_params->mac_buf);
953 /* Output including mac */
954 size = outputlen - iv_len;
956 uint32_t aad_offset =
957 aad_len ? passthrough_len : 0;
959 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
960 i = fill_sg_comp_from_buf_min(
966 i = fill_sg_comp_from_iov(scatter_comp,
974 if (unlikely(size)) {
975 CPT_LOG_DP_ERR("Insufficient buffer"
976 " space, size %d needed",
982 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
983 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
985 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
987 /* This is DPTR len incase of SG mode */
988 vq_cmd_w0.s.dlen = size;
990 m_vaddr = (uint8_t *)m_vaddr + size;
993 /* cpt alternate completion address saved earlier */
994 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
995 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
996 rptr_dma = c_dma - 8;
998 req->ist.ei1 = dptr_dma;
999 req->ist.ei2 = rptr_dma;
1002 ctx_dma = fc_params->ctx_buf.dma_addr +
1003 offsetof(struct cpt_ctx, fctx);
1006 vq_cmd_w3.s.grp = 0;
1007 vq_cmd_w3.s.cptr = ctx_dma;
1009 /* 16 byte aligned cpt res address */
1010 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1011 *req->completion_addr = COMPLETION_CODE_INIT;
1012 req->comp_baddr = c_dma;
1014 /* Fill microcode part of instruction */
1015 req->ist.ei0 = vq_cmd_w0.u64;
1016 req->ist.ei3 = vq_cmd_w3.u64;
1024 static __rte_always_inline void
1025 cpt_dec_hmac_prep(uint32_t flags,
1028 fc_params_t *fc_params,
1032 uint32_t iv_offset = 0, size;
1033 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
1034 struct cpt_ctx *cpt_ctx;
1035 int32_t hash_type, mac_len;
1036 uint8_t iv_len = 16;
1037 struct cpt_request_info *req;
1038 buf_ptr_t *meta_p, *aad_buf = NULL;
1039 uint32_t encr_offset, auth_offset;
1040 uint32_t encr_data_len, auth_data_len, aad_len = 0;
1041 uint32_t passthrough_len = 0;
1042 void *m_vaddr, *offset_vaddr;
1043 uint64_t m_dma, offset_dma, ctx_dma;
1044 opcode_info_t opcode;
1045 vq_cmd_word0_t vq_cmd_w0;
1046 vq_cmd_word3_t vq_cmd_w3;
1050 meta_p = &fc_params->meta_buf;
1051 m_vaddr = meta_p->vaddr;
1052 m_dma = meta_p->dma_addr;
1054 encr_offset = ENCR_OFFSET(d_offs);
1055 auth_offset = AUTH_OFFSET(d_offs);
1056 encr_data_len = ENCR_DLEN(d_lens);
1057 auth_data_len = AUTH_DLEN(d_lens);
1059 if (unlikely(flags & VALID_AAD_BUF)) {
1061 * We dont support both aad
1062 * and auth data separately
1066 aad_len = fc_params->aad_buf.size;
1067 aad_buf = &fc_params->aad_buf;
1070 cpt_ctx = fc_params->ctx_buf.vaddr;
1071 hash_type = cpt_ctx->hash_type;
1072 mac_len = cpt_ctx->mac_len;
1074 if (hash_type == GMAC_TYPE)
1077 if (unlikely(!(flags & VALID_IV_BUF))) {
1079 iv_offset = ENCR_IV_OFFSET(d_offs);
1082 if (unlikely(flags & VALID_AAD_BUF)) {
1084 * When AAD is given, data above encr_offset is pass through
1085 * Since AAD is given as separate pointer and not as offset,
1086 * this is a special case as we need to fragment input data
1087 * into passthrough + encr_data and then insert AAD in between.
1089 if (hash_type != GMAC_TYPE) {
1090 passthrough_len = encr_offset;
1091 auth_offset = passthrough_len + iv_len;
1092 encr_offset = passthrough_len + aad_len + iv_len;
1093 auth_data_len = aad_len + encr_data_len;
1095 passthrough_len = 16 + aad_len;
1096 auth_offset = passthrough_len + iv_len;
1097 auth_data_len = aad_len;
1100 encr_offset += iv_len;
1101 auth_offset += iv_len;
1105 * Save initial space that followed app data for completion code &
1106 * alternate completion code to fall in same cache line as app data
1108 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1109 m_dma += COMPLETION_CODE_SIZE;
1110 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1112 c_vaddr = (uint8_t *)m_vaddr + size;
1113 c_dma = m_dma + size;
1114 size += sizeof(cpt_res_s_t);
1116 m_vaddr = (uint8_t *)m_vaddr + size;
1119 /* start cpt request info structure at 8 byte alignment */
1120 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
1123 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
1125 size += sizeof(struct cpt_request_info);
1126 m_vaddr = (uint8_t *)m_vaddr + size;
1130 opcode.s.major = CPT_MAJOR_OP_FC;
1133 enc_dlen = encr_offset + encr_data_len;
1134 auth_dlen = auth_offset + auth_data_len;
1136 if (auth_dlen > enc_dlen) {
1137 inputlen = auth_dlen + mac_len;
1138 outputlen = auth_dlen;
1140 inputlen = enc_dlen + mac_len;
1141 outputlen = enc_dlen;
1144 if (hash_type == GMAC_TYPE)
1145 encr_offset = inputlen;
1148 vq_cmd_w0.s.param1 = encr_data_len;
1149 vq_cmd_w0.s.param2 = auth_data_len;
1152 * In 83XX since we have a limitation of
1153 * IV & Offset control word not part of instruction
1154 * and need to be part of Data Buffer, we check if
1155 * head room is there and then only do the Direct mode processing
1157 if (likely((flags & SINGLE_BUF_INPLACE) &&
1158 (flags & SINGLE_BUF_HEADTAILROOM))) {
1159 void *dm_vaddr = fc_params->bufs[0].vaddr;
1160 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
1162 * This flag indicates that there is 24 bytes head room and
1163 * 8 bytes tail room available, so that we get to do
1164 * DIRECT MODE with limitation
1167 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
1168 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1169 req->ist.ei1 = offset_dma;
1171 /* RPTR should just exclude offset control word */
1172 req->ist.ei2 = dm_dma_addr - iv_len;
1174 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1175 outputlen - iv_len);
1176 /* since this is decryption,
1177 * don't touch the content of
1178 * alternate ccode space as it contains
1182 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1184 vq_cmd_w0.s.opcode = opcode.flags;
1186 if (likely(iv_len)) {
1187 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1189 uint64_t *src = fc_params->iv_buf;
1194 *(uint64_t *)offset_vaddr =
1195 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1196 ((uint64_t)iv_offset << 8) |
1197 ((uint64_t)auth_offset));
1200 uint64_t dptr_dma, rptr_dma;
1201 uint32_t g_size_bytes, s_size_bytes;
1202 sg_comp_t *gather_comp;
1203 sg_comp_t *scatter_comp;
1207 /* This falls under strict SG mode */
1208 offset_vaddr = m_vaddr;
1210 size = OFF_CTRL_LEN + iv_len;
1212 m_vaddr = (uint8_t *)m_vaddr + size;
1215 opcode.s.major |= CPT_DMA_MODE;
1217 vq_cmd_w0.s.opcode = opcode.flags;
1219 if (likely(iv_len)) {
1220 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1222 uint64_t *src = fc_params->iv_buf;
1227 *(uint64_t *)offset_vaddr =
1228 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1229 ((uint64_t)iv_offset << 8) |
1230 ((uint64_t)auth_offset));
1232 /* DPTR has SG list */
1233 in_buffer = m_vaddr;
1236 ((uint16_t *)in_buffer)[0] = 0;
1237 ((uint16_t *)in_buffer)[1] = 0;
1239 /* TODO Add error check if space will be sufficient */
1240 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1247 /* Offset control word that includes iv */
1248 i = fill_sg_comp(gather_comp, i, offset_dma,
1249 OFF_CTRL_LEN + iv_len);
1251 /* Add input data */
1252 if (flags & VALID_MAC_BUF) {
1253 size = inputlen - iv_len - mac_len;
1255 /* input data only */
1256 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1257 i = fill_sg_comp_from_buf_min(
1262 uint32_t aad_offset = aad_len ?
1263 passthrough_len : 0;
1265 i = fill_sg_comp_from_iov(gather_comp,
1272 if (unlikely(size)) {
1273 CPT_LOG_DP_ERR("Insufficient buffer"
1274 " space, size %d needed",
1282 i = fill_sg_comp_from_buf(gather_comp, i,
1283 &fc_params->mac_buf);
1286 /* input data + mac */
1287 size = inputlen - iv_len;
1289 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1290 i = fill_sg_comp_from_buf_min(
1295 uint32_t aad_offset = aad_len ?
1296 passthrough_len : 0;
1298 if (unlikely(!fc_params->src_iov)) {
1299 CPT_LOG_DP_ERR("Bad input args");
1303 i = fill_sg_comp_from_iov(
1311 if (unlikely(size)) {
1312 CPT_LOG_DP_ERR("Insufficient buffer"
1313 " space, size %d needed",
1319 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1320 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1323 * Output Scatter List
1328 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1332 i = fill_sg_comp(scatter_comp, i,
1333 offset_dma + OFF_CTRL_LEN,
1337 /* Add output data */
1338 size = outputlen - iv_len;
1340 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1341 /* handle single buffer here */
1342 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1346 uint32_t aad_offset = aad_len ?
1347 passthrough_len : 0;
1349 if (unlikely(!fc_params->dst_iov)) {
1350 CPT_LOG_DP_ERR("Bad input args");
1354 i = fill_sg_comp_from_iov(scatter_comp, i,
1355 fc_params->dst_iov, 0,
1360 if (unlikely(size)) {
1361 CPT_LOG_DP_ERR("Insufficient buffer space,"
1362 " size %d needed", size);
1367 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1368 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1370 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1372 /* This is DPTR len incase of SG mode */
1373 vq_cmd_w0.s.dlen = size;
1375 m_vaddr = (uint8_t *)m_vaddr + size;
1378 /* cpt alternate completion address saved earlier */
1379 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1380 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1381 rptr_dma = c_dma - 8;
1382 size += COMPLETION_CODE_SIZE;
1384 req->ist.ei1 = dptr_dma;
1385 req->ist.ei2 = rptr_dma;
1388 ctx_dma = fc_params->ctx_buf.dma_addr +
1389 offsetof(struct cpt_ctx, fctx);
1392 vq_cmd_w3.s.grp = 0;
1393 vq_cmd_w3.s.cptr = ctx_dma;
1395 /* 16 byte aligned cpt res address */
1396 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1397 *req->completion_addr = COMPLETION_CODE_INIT;
1398 req->comp_baddr = c_dma;
1400 /* Fill microcode part of instruction */
1401 req->ist.ei0 = vq_cmd_w0.u64;
1402 req->ist.ei3 = vq_cmd_w3.u64;
1410 static __rte_always_inline void
1411 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1414 fc_params_t *params,
1419 int32_t inputlen, outputlen;
1420 struct cpt_ctx *cpt_ctx;
1421 uint32_t mac_len = 0;
1423 struct cpt_request_info *req;
1425 uint32_t encr_offset = 0, auth_offset = 0;
1426 uint32_t encr_data_len = 0, auth_data_len = 0;
1427 int flags, iv_len = 16;
1428 void *m_vaddr, *c_vaddr;
1429 uint64_t m_dma, c_dma, offset_ctrl;
1430 uint64_t *offset_vaddr, offset_dma;
1431 uint32_t *iv_s, iv[4];
1432 vq_cmd_word0_t vq_cmd_w0;
1433 vq_cmd_word3_t vq_cmd_w3;
1434 opcode_info_t opcode;
1436 buf_p = ¶ms->meta_buf;
1437 m_vaddr = buf_p->vaddr;
1438 m_dma = buf_p->dma_addr;
1440 cpt_ctx = params->ctx_buf.vaddr;
1441 flags = cpt_ctx->zsk_flags;
1442 mac_len = cpt_ctx->mac_len;
1443 snow3g = cpt_ctx->snow3g;
1446 * Save initial space that followed app data for completion code &
1447 * alternate completion code to fall in same cache line as app data
1449 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1450 m_dma += COMPLETION_CODE_SIZE;
1451 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1454 c_vaddr = (uint8_t *)m_vaddr + size;
1455 c_dma = m_dma + size;
1456 size += sizeof(cpt_res_s_t);
1458 m_vaddr = (uint8_t *)m_vaddr + size;
1461 /* Reserve memory for cpt request info */
1464 size = sizeof(struct cpt_request_info);
1465 m_vaddr = (uint8_t *)m_vaddr + size;
1468 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1470 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1472 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1473 (0 << 3) | (flags & 0x7));
1477 * Microcode expects offsets in bytes
1478 * TODO: Rounding off
1480 auth_data_len = AUTH_DLEN(d_lens);
1483 auth_offset = AUTH_OFFSET(d_offs);
1484 auth_offset = auth_offset / 8;
1486 /* consider iv len */
1487 auth_offset += iv_len;
1489 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1490 outputlen = mac_len;
1492 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1497 * Microcode expects offsets in bytes
1498 * TODO: Rounding off
1500 encr_data_len = ENCR_DLEN(d_lens);
1502 encr_offset = ENCR_OFFSET(d_offs);
1503 encr_offset = encr_offset / 8;
1504 /* consider iv len */
1505 encr_offset += iv_len;
1507 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1508 outputlen = inputlen;
1510 /* iv offset is 0 */
1511 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1515 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1520 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1521 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1524 for (j = 0; j < 4; j++)
1525 iv[j] = iv_s[3 - j];
1527 /* ZUC doesn't need a swap */
1528 for (j = 0; j < 4; j++)
1533 * GP op header, lengths are expected in bits.
1536 vq_cmd_w0.s.param1 = encr_data_len;
1537 vq_cmd_w0.s.param2 = auth_data_len;
1540 * In 83XX since we have a limitation of
1541 * IV & Offset control word not part of instruction
1542 * and need to be part of Data Buffer, we check if
1543 * head room is there and then only do the Direct mode processing
1545 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1546 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1547 void *dm_vaddr = params->bufs[0].vaddr;
1548 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1550 * This flag indicates that there is 24 bytes head room and
1551 * 8 bytes tail room available, so that we get to do
1552 * DIRECT MODE with limitation
1555 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1556 OFF_CTRL_LEN - iv_len);
1557 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1560 req->ist.ei1 = offset_dma;
1561 /* RPTR should just exclude offset control word */
1562 req->ist.ei2 = dm_dma_addr - iv_len;
1563 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1564 + outputlen - iv_len);
1566 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1568 vq_cmd_w0.s.opcode = opcode.flags;
1570 if (likely(iv_len)) {
1571 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1573 memcpy(iv_d, iv, 16);
1576 *offset_vaddr = offset_ctrl;
1578 uint32_t i, g_size_bytes, s_size_bytes;
1579 uint64_t dptr_dma, rptr_dma;
1580 sg_comp_t *gather_comp;
1581 sg_comp_t *scatter_comp;
1585 /* save space for iv */
1586 offset_vaddr = m_vaddr;
1589 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1590 m_dma += OFF_CTRL_LEN + iv_len;
1592 opcode.s.major |= CPT_DMA_MODE;
1594 vq_cmd_w0.s.opcode = opcode.flags;
1596 /* DPTR has SG list */
1597 in_buffer = m_vaddr;
1600 ((uint16_t *)in_buffer)[0] = 0;
1601 ((uint16_t *)in_buffer)[1] = 0;
1603 /* TODO Add error check if space will be sufficient */
1604 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1611 /* Offset control word followed by iv */
1613 i = fill_sg_comp(gather_comp, i, offset_dma,
1614 OFF_CTRL_LEN + iv_len);
1616 /* iv offset is 0 */
1617 *offset_vaddr = offset_ctrl;
1619 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1620 memcpy(iv_d, iv, 16);
1623 size = inputlen - iv_len;
1625 i = fill_sg_comp_from_iov(gather_comp, i,
1628 if (unlikely(size)) {
1629 CPT_LOG_DP_ERR("Insufficient buffer space,"
1630 " size %d needed", size);
1634 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1635 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1638 * Output Scatter List
1643 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1646 /* IV in SLIST only for EEA3 & UEA2 */
1651 i = fill_sg_comp(scatter_comp, i,
1652 offset_dma + OFF_CTRL_LEN, iv_len);
1655 /* Add output data */
1656 if (req_flags & VALID_MAC_BUF) {
1657 size = outputlen - iv_len - mac_len;
1659 i = fill_sg_comp_from_iov(scatter_comp, i,
1663 if (unlikely(size)) {
1664 CPT_LOG_DP_ERR("Insufficient buffer space,"
1665 " size %d needed", size);
1672 i = fill_sg_comp_from_buf(scatter_comp, i,
1676 /* Output including mac */
1677 size = outputlen - iv_len;
1679 i = fill_sg_comp_from_iov(scatter_comp, i,
1683 if (unlikely(size)) {
1684 CPT_LOG_DP_ERR("Insufficient buffer space,"
1685 " size %d needed", size);
1690 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1691 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1693 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1695 /* This is DPTR len incase of SG mode */
1696 vq_cmd_w0.s.dlen = size;
1698 m_vaddr = (uint8_t *)m_vaddr + size;
1701 /* cpt alternate completion address saved earlier */
1702 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1703 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1704 rptr_dma = c_dma - 8;
1706 req->ist.ei1 = dptr_dma;
1707 req->ist.ei2 = rptr_dma;
1712 vq_cmd_w3.s.grp = 0;
1713 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1714 offsetof(struct cpt_ctx, zs_ctx);
1716 /* 16 byte aligned cpt res address */
1717 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1718 *req->completion_addr = COMPLETION_CODE_INIT;
1719 req->comp_baddr = c_dma;
1721 /* Fill microcode part of instruction */
1722 req->ist.ei0 = vq_cmd_w0.u64;
1723 req->ist.ei3 = vq_cmd_w3.u64;
1731 static __rte_always_inline void
1732 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1735 fc_params_t *params,
1740 int32_t inputlen = 0, outputlen;
1741 struct cpt_ctx *cpt_ctx;
1742 uint8_t snow3g, iv_len = 16;
1743 struct cpt_request_info *req;
1745 uint32_t encr_offset;
1746 uint32_t encr_data_len;
1748 void *m_vaddr, *c_vaddr;
1749 uint64_t m_dma, c_dma;
1750 uint64_t *offset_vaddr, offset_dma;
1751 uint32_t *iv_s, iv[4], j;
1752 vq_cmd_word0_t vq_cmd_w0;
1753 vq_cmd_word3_t vq_cmd_w3;
1754 opcode_info_t opcode;
1756 buf_p = ¶ms->meta_buf;
1757 m_vaddr = buf_p->vaddr;
1758 m_dma = buf_p->dma_addr;
1761 * Microcode expects offsets in bytes
1762 * TODO: Rounding off
1764 encr_offset = ENCR_OFFSET(d_offs) / 8;
1765 encr_data_len = ENCR_DLEN(d_lens);
1767 cpt_ctx = params->ctx_buf.vaddr;
1768 flags = cpt_ctx->zsk_flags;
1769 snow3g = cpt_ctx->snow3g;
1771 * Save initial space that followed app data for completion code &
1772 * alternate completion code to fall in same cache line as app data
1774 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1775 m_dma += COMPLETION_CODE_SIZE;
1776 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1779 c_vaddr = (uint8_t *)m_vaddr + size;
1780 c_dma = m_dma + size;
1781 size += sizeof(cpt_res_s_t);
1783 m_vaddr = (uint8_t *)m_vaddr + size;
1786 /* Reserve memory for cpt request info */
1789 size = sizeof(struct cpt_request_info);
1790 m_vaddr = (uint8_t *)m_vaddr + size;
1793 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1795 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1797 opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
1798 (0 << 3) | (flags & 0x7));
1800 /* consider iv len */
1801 encr_offset += iv_len;
1803 inputlen = encr_offset +
1804 (RTE_ALIGN(encr_data_len, 8) / 8);
1805 outputlen = inputlen;
1808 iv_s = params->iv_buf;
1811 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1812 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1815 for (j = 0; j < 4; j++)
1816 iv[j] = iv_s[3 - j];
1818 /* ZUC doesn't need a swap */
1819 for (j = 0; j < 4; j++)
1824 * GP op header, lengths are expected in bits.
1827 vq_cmd_w0.s.param1 = encr_data_len;
1830 * In 83XX since we have a limitation of
1831 * IV & Offset control word not part of instruction
1832 * and need to be part of Data Buffer, we check if
1833 * head room is there and then only do the Direct mode processing
1835 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1836 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1837 void *dm_vaddr = params->bufs[0].vaddr;
1838 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1840 * This flag indicates that there is 24 bytes head room and
1841 * 8 bytes tail room available, so that we get to do
1842 * DIRECT MODE with limitation
1845 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1846 OFF_CTRL_LEN - iv_len);
1847 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1850 req->ist.ei1 = offset_dma;
1851 /* RPTR should just exclude offset control word */
1852 req->ist.ei2 = dm_dma_addr - iv_len;
1853 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1854 + outputlen - iv_len);
1856 vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
1858 vq_cmd_w0.s.opcode = opcode.flags;
1860 if (likely(iv_len)) {
1861 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1863 memcpy(iv_d, iv, 16);
1866 /* iv offset is 0 */
1867 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1869 uint32_t i, g_size_bytes, s_size_bytes;
1870 uint64_t dptr_dma, rptr_dma;
1871 sg_comp_t *gather_comp;
1872 sg_comp_t *scatter_comp;
1876 /* save space for offset and iv... */
1877 offset_vaddr = m_vaddr;
1880 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1881 m_dma += OFF_CTRL_LEN + iv_len;
1883 opcode.s.major |= CPT_DMA_MODE;
1885 vq_cmd_w0.s.opcode = opcode.flags;
1887 /* DPTR has SG list */
1888 in_buffer = m_vaddr;
1891 ((uint16_t *)in_buffer)[0] = 0;
1892 ((uint16_t *)in_buffer)[1] = 0;
1894 /* TODO Add error check if space will be sufficient */
1895 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1902 /* Offset control word */
1904 /* iv offset is 0 */
1905 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1907 i = fill_sg_comp(gather_comp, i, offset_dma,
1908 OFF_CTRL_LEN + iv_len);
1910 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1911 memcpy(iv_d, iv, 16);
1913 /* Add input data */
1914 size = inputlen - iv_len;
1916 i = fill_sg_comp_from_iov(gather_comp, i,
1919 if (unlikely(size)) {
1920 CPT_LOG_DP_ERR("Insufficient buffer space,"
1921 " size %d needed", size);
1925 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1926 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1929 * Output Scatter List
1934 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1937 i = fill_sg_comp(scatter_comp, i,
1938 offset_dma + OFF_CTRL_LEN,
1941 /* Add output data */
1942 size = outputlen - iv_len;
1944 i = fill_sg_comp_from_iov(scatter_comp, i,
1948 if (unlikely(size)) {
1949 CPT_LOG_DP_ERR("Insufficient buffer space,"
1950 " size %d needed", size);
1954 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1955 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1957 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1959 /* This is DPTR len incase of SG mode */
1960 vq_cmd_w0.s.dlen = size;
1962 m_vaddr = (uint8_t *)m_vaddr + size;
1965 /* cpt alternate completion address saved earlier */
1966 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1967 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1968 rptr_dma = c_dma - 8;
1970 req->ist.ei1 = dptr_dma;
1971 req->ist.ei2 = rptr_dma;
1976 vq_cmd_w3.s.grp = 0;
1977 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1978 offsetof(struct cpt_ctx, zs_ctx);
1980 /* 16 byte aligned cpt res address */
1981 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1982 *req->completion_addr = COMPLETION_CODE_INIT;
1983 req->comp_baddr = c_dma;
1985 /* Fill microcode part of instruction */
1986 req->ist.ei0 = vq_cmd_w0.u64;
1987 req->ist.ei3 = vq_cmd_w3.u64;
1995 static __rte_always_inline void
1996 cpt_kasumi_enc_prep(uint32_t req_flags,
1999 fc_params_t *params,
2004 int32_t inputlen = 0, outputlen = 0;
2005 struct cpt_ctx *cpt_ctx;
2006 uint32_t mac_len = 0;
2008 struct cpt_request_info *req;
2010 uint32_t encr_offset, auth_offset;
2011 uint32_t encr_data_len, auth_data_len;
2013 uint8_t *iv_s, *iv_d, iv_len = 8;
2015 void *m_vaddr, *c_vaddr;
2016 uint64_t m_dma, c_dma;
2017 uint64_t *offset_vaddr, offset_dma;
2018 vq_cmd_word0_t vq_cmd_w0;
2019 vq_cmd_word3_t vq_cmd_w3;
2020 opcode_info_t opcode;
2022 uint32_t g_size_bytes, s_size_bytes;
2023 uint64_t dptr_dma, rptr_dma;
2024 sg_comp_t *gather_comp;
2025 sg_comp_t *scatter_comp;
2027 buf_p = ¶ms->meta_buf;
2028 m_vaddr = buf_p->vaddr;
2029 m_dma = buf_p->dma_addr;
2031 encr_offset = ENCR_OFFSET(d_offs) / 8;
2032 auth_offset = AUTH_OFFSET(d_offs) / 8;
2033 encr_data_len = ENCR_DLEN(d_lens);
2034 auth_data_len = AUTH_DLEN(d_lens);
2036 cpt_ctx = params->ctx_buf.vaddr;
2037 flags = cpt_ctx->zsk_flags;
2038 mac_len = cpt_ctx->mac_len;
2041 iv_s = params->iv_buf;
2043 iv_s = params->auth_iv_buf;
2045 dir = iv_s[8] & 0x1;
2048 * Save initial space that followed app data for completion code &
2049 * alternate completion code to fall in same cache line as app data
2051 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2052 m_dma += COMPLETION_CODE_SIZE;
2053 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2056 c_vaddr = (uint8_t *)m_vaddr + size;
2057 c_dma = m_dma + size;
2058 size += sizeof(cpt_res_s_t);
2060 m_vaddr = (uint8_t *)m_vaddr + size;
2063 /* Reserve memory for cpt request info */
2066 size = sizeof(struct cpt_request_info);
2067 m_vaddr = (uint8_t *)m_vaddr + size;
2070 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2072 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2073 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2074 (dir << 4) | (0 << 3) | (flags & 0x7));
2077 * GP op header, lengths are expected in bits.
2080 vq_cmd_w0.s.param1 = encr_data_len;
2081 vq_cmd_w0.s.param2 = auth_data_len;
2082 vq_cmd_w0.s.opcode = opcode.flags;
2084 /* consider iv len */
2086 encr_offset += iv_len;
2087 auth_offset += iv_len;
2090 /* save space for offset ctrl and iv */
2091 offset_vaddr = m_vaddr;
2094 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2095 m_dma += OFF_CTRL_LEN + iv_len;
2097 /* DPTR has SG list */
2098 in_buffer = m_vaddr;
2101 ((uint16_t *)in_buffer)[0] = 0;
2102 ((uint16_t *)in_buffer)[1] = 0;
2104 /* TODO Add error check if space will be sufficient */
2105 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2112 /* Offset control word followed by iv */
2115 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
2116 outputlen = inputlen;
2117 /* iv offset is 0 */
2118 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2120 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
2121 outputlen = mac_len;
2122 /* iv offset is 0 */
2123 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
2126 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2129 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
2130 memcpy(iv_d, iv_s, iv_len);
2133 size = inputlen - iv_len;
2135 i = fill_sg_comp_from_iov(gather_comp, i,
2139 if (unlikely(size)) {
2140 CPT_LOG_DP_ERR("Insufficient buffer space,"
2141 " size %d needed", size);
2145 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2146 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2149 * Output Scatter List
2153 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2156 /* IV in SLIST only for F8 */
2162 i = fill_sg_comp(scatter_comp, i,
2163 offset_dma + OFF_CTRL_LEN,
2167 /* Add output data */
2168 if (req_flags & VALID_MAC_BUF) {
2169 size = outputlen - iv_len - mac_len;
2171 i = fill_sg_comp_from_iov(scatter_comp, i,
2175 if (unlikely(size)) {
2176 CPT_LOG_DP_ERR("Insufficient buffer space,"
2177 " size %d needed", size);
2184 i = fill_sg_comp_from_buf(scatter_comp, i,
2188 /* Output including mac */
2189 size = outputlen - iv_len;
2191 i = fill_sg_comp_from_iov(scatter_comp, i,
2195 if (unlikely(size)) {
2196 CPT_LOG_DP_ERR("Insufficient buffer space,"
2197 " size %d needed", size);
2202 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2203 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2205 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2207 /* This is DPTR len incase of SG mode */
2208 vq_cmd_w0.s.dlen = size;
2210 m_vaddr = (uint8_t *)m_vaddr + size;
2213 /* cpt alternate completion address saved earlier */
2214 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2215 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2216 rptr_dma = c_dma - 8;
2218 req->ist.ei1 = dptr_dma;
2219 req->ist.ei2 = rptr_dma;
2223 vq_cmd_w3.s.grp = 0;
2224 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2225 offsetof(struct cpt_ctx, k_ctx);
2227 /* 16 byte aligned cpt res address */
2228 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2229 *req->completion_addr = COMPLETION_CODE_INIT;
2230 req->comp_baddr = c_dma;
2232 /* Fill microcode part of instruction */
2233 req->ist.ei0 = vq_cmd_w0.u64;
2234 req->ist.ei3 = vq_cmd_w3.u64;
2242 static __rte_always_inline void
2243 cpt_kasumi_dec_prep(uint64_t d_offs,
2245 fc_params_t *params,
2250 int32_t inputlen = 0, outputlen;
2251 struct cpt_ctx *cpt_ctx;
2252 uint8_t i = 0, iv_len = 8;
2253 struct cpt_request_info *req;
2255 uint32_t encr_offset;
2256 uint32_t encr_data_len;
2259 void *m_vaddr, *c_vaddr;
2260 uint64_t m_dma, c_dma;
2261 uint64_t *offset_vaddr, offset_dma;
2262 vq_cmd_word0_t vq_cmd_w0;
2263 vq_cmd_word3_t vq_cmd_w3;
2264 opcode_info_t opcode;
2266 uint32_t g_size_bytes, s_size_bytes;
2267 uint64_t dptr_dma, rptr_dma;
2268 sg_comp_t *gather_comp;
2269 sg_comp_t *scatter_comp;
2271 buf_p = ¶ms->meta_buf;
2272 m_vaddr = buf_p->vaddr;
2273 m_dma = buf_p->dma_addr;
2275 encr_offset = ENCR_OFFSET(d_offs) / 8;
2276 encr_data_len = ENCR_DLEN(d_lens);
2278 cpt_ctx = params->ctx_buf.vaddr;
2279 flags = cpt_ctx->zsk_flags;
2281 * Save initial space that followed app data for completion code &
2282 * alternate completion code to fall in same cache line as app data
2284 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2285 m_dma += COMPLETION_CODE_SIZE;
2286 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2289 c_vaddr = (uint8_t *)m_vaddr + size;
2290 c_dma = m_dma + size;
2291 size += sizeof(cpt_res_s_t);
2293 m_vaddr = (uint8_t *)m_vaddr + size;
2296 /* Reserve memory for cpt request info */
2299 size = sizeof(struct cpt_request_info);
2300 m_vaddr = (uint8_t *)m_vaddr + size;
2303 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2305 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2306 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2307 (dir << 4) | (0 << 3) | (flags & 0x7));
2310 * GP op header, lengths are expected in bits.
2313 vq_cmd_w0.s.param1 = encr_data_len;
2314 vq_cmd_w0.s.opcode = opcode.flags;
2316 /* consider iv len */
2317 encr_offset += iv_len;
2319 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2320 outputlen = inputlen;
2322 /* save space for offset ctrl & iv */
2323 offset_vaddr = m_vaddr;
2326 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2327 m_dma += OFF_CTRL_LEN + iv_len;
2329 /* DPTR has SG list */
2330 in_buffer = m_vaddr;
2333 ((uint16_t *)in_buffer)[0] = 0;
2334 ((uint16_t *)in_buffer)[1] = 0;
2336 /* TODO Add error check if space will be sufficient */
2337 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2344 /* Offset control word followed by iv */
2345 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2347 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2350 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2351 params->iv_buf, iv_len);
2353 /* Add input data */
2354 size = inputlen - iv_len;
2356 i = fill_sg_comp_from_iov(gather_comp, i,
2359 if (unlikely(size)) {
2360 CPT_LOG_DP_ERR("Insufficient buffer space,"
2361 " size %d needed", size);
2365 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2366 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2369 * Output Scatter List
2373 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2376 i = fill_sg_comp(scatter_comp, i,
2377 offset_dma + OFF_CTRL_LEN,
2380 /* Add output data */
2381 size = outputlen - iv_len;
2383 i = fill_sg_comp_from_iov(scatter_comp, i,
2386 if (unlikely(size)) {
2387 CPT_LOG_DP_ERR("Insufficient buffer space,"
2388 " size %d needed", size);
2392 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2393 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2395 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2397 /* This is DPTR len incase of SG mode */
2398 vq_cmd_w0.s.dlen = size;
2400 m_vaddr = (uint8_t *)m_vaddr + size;
2403 /* cpt alternate completion address saved earlier */
2404 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2405 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2406 rptr_dma = c_dma - 8;
2408 req->ist.ei1 = dptr_dma;
2409 req->ist.ei2 = rptr_dma;
2413 vq_cmd_w3.s.grp = 0;
2414 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2415 offsetof(struct cpt_ctx, k_ctx);
2417 /* 16 byte aligned cpt res address */
2418 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2419 *req->completion_addr = COMPLETION_CODE_INIT;
2420 req->comp_baddr = c_dma;
2422 /* Fill microcode part of instruction */
2423 req->ist.ei0 = vq_cmd_w0.u64;
2424 req->ist.ei3 = vq_cmd_w3.u64;
2432 static __rte_always_inline void *
2433 cpt_fc_dec_hmac_prep(uint32_t flags,
2436 fc_params_t *fc_params,
2439 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2441 void *prep_req = NULL;
2443 fc_type = ctx->fc_type;
2445 if (likely(fc_type == FC_GEN)) {
2446 cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2448 } else if (fc_type == ZUC_SNOW3G) {
2449 cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
2451 } else if (fc_type == KASUMI) {
2452 cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
2456 * For AUTH_ONLY case,
2457 * MC only supports digest generation and verification
2458 * should be done in software by memcmp()
2464 static __rte_always_inline void *__hot
2465 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2466 fc_params_t *fc_params, void *op)
2468 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2470 void *prep_req = NULL;
2472 fc_type = ctx->fc_type;
2474 /* Common api for rest of the ops */
2475 if (likely(fc_type == FC_GEN)) {
2476 cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
2478 } else if (fc_type == ZUC_SNOW3G) {
2479 cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
2481 } else if (fc_type == KASUMI) {
2482 cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
2484 } else if (fc_type == HASH_HMAC) {
2485 cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
2491 static __rte_always_inline int
2492 cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
2493 uint16_t key_len, uint16_t mac_len)
2495 struct cpt_ctx *cpt_ctx = ctx;
2496 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2497 uint64_t *ctrl_flags = NULL;
2499 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2504 /* No support for AEAD yet */
2505 if (cpt_ctx->enc_cipher)
2507 /* For ZUC/SNOW3G/Kasumi */
2510 cpt_ctx->snow3g = 1;
2511 gen_key_snow3g(key, keyx);
2512 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2513 cpt_ctx->fc_type = ZUC_SNOW3G;
2514 cpt_ctx->zsk_flags = 0x1;
2517 cpt_ctx->snow3g = 0;
2518 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2519 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2520 cpt_ctx->fc_type = ZUC_SNOW3G;
2521 cpt_ctx->zsk_flags = 0x1;
2524 /* Kasumi ECB mode */
2526 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2527 cpt_ctx->fc_type = KASUMI;
2528 cpt_ctx->zsk_flags = 0x1;
2531 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2532 cpt_ctx->fc_type = KASUMI;
2533 cpt_ctx->zsk_flags = 0x1;
2538 cpt_ctx->mac_len = 4;
2539 cpt_ctx->hash_type = type;
2543 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2544 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2545 cpt_ctx->fc_type = HASH_HMAC;
2548 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2549 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2551 /* For GMAC auth, cipher must be NULL */
2552 if (type == GMAC_TYPE)
2553 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2555 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2556 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2560 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2561 memcpy(cpt_ctx->auth_key, key, key_len);
2562 cpt_ctx->auth_key_len = key_len;
2563 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2564 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2565 memcpy(fctx->hmac.opad, key, key_len);
2566 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2568 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2572 static __rte_always_inline int
2573 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2574 struct cpt_sess_misc *sess)
2576 struct rte_crypto_aead_xform *aead_form;
2577 cipher_type_t enc_type = 0; /* NULL Cipher type */
2578 auth_type_t auth_type = 0; /* NULL Auth type */
2579 uint32_t cipher_key_len = 0;
2580 uint8_t aes_gcm = 0;
2581 aead_form = &xform->aead;
2582 void *ctx = SESS_PRIV(sess);
2584 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2585 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2586 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2587 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2588 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2589 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2590 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2591 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2593 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2596 switch (aead_form->algo) {
2597 case RTE_CRYPTO_AEAD_AES_GCM:
2599 cipher_key_len = 16;
2602 case RTE_CRYPTO_AEAD_AES_CCM:
2603 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2607 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2611 if (aead_form->key.length < cipher_key_len) {
2612 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2613 (unsigned int long)aead_form->key.length);
2617 sess->aes_gcm = aes_gcm;
2618 sess->mac_len = aead_form->digest_length;
2619 sess->iv_offset = aead_form->iv.offset;
2620 sess->iv_length = aead_form->iv.length;
2621 sess->aad_length = aead_form->aad_length;
2623 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2624 aead_form->key.length, NULL);
2626 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2631 static __rte_always_inline int
2632 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2633 struct cpt_sess_misc *sess)
2635 struct rte_crypto_cipher_xform *c_form;
2636 cipher_type_t enc_type = 0; /* NULL Cipher type */
2637 uint32_t cipher_key_len = 0;
2638 uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
2640 c_form = &xform->cipher;
2642 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2643 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2644 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2645 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2647 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2651 switch (c_form->algo) {
2652 case RTE_CRYPTO_CIPHER_AES_CBC:
2654 cipher_key_len = 16;
2656 case RTE_CRYPTO_CIPHER_3DES_CBC:
2657 enc_type = DES3_CBC;
2658 cipher_key_len = 24;
2660 case RTE_CRYPTO_CIPHER_DES_CBC:
2661 /* DES is implemented using 3DES in hardware */
2662 enc_type = DES3_CBC;
2665 case RTE_CRYPTO_CIPHER_AES_CTR:
2667 cipher_key_len = 16;
2670 case RTE_CRYPTO_CIPHER_NULL:
2674 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2675 enc_type = KASUMI_F8_ECB;
2676 cipher_key_len = 16;
2679 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2680 enc_type = SNOW3G_UEA2;
2681 cipher_key_len = 16;
2684 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2685 enc_type = ZUC_EEA3;
2686 cipher_key_len = 16;
2689 case RTE_CRYPTO_CIPHER_AES_XTS:
2691 cipher_key_len = 16;
2693 case RTE_CRYPTO_CIPHER_3DES_ECB:
2694 enc_type = DES3_ECB;
2695 cipher_key_len = 24;
2697 case RTE_CRYPTO_CIPHER_AES_ECB:
2699 cipher_key_len = 16;
2701 case RTE_CRYPTO_CIPHER_3DES_CTR:
2702 case RTE_CRYPTO_CIPHER_AES_F8:
2703 case RTE_CRYPTO_CIPHER_ARC4:
2704 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2708 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2713 if (c_form->key.length < cipher_key_len) {
2714 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2715 (unsigned long) c_form->key.length);
2719 sess->zsk_flag = zsk_flag;
2721 sess->aes_ctr = aes_ctr;
2722 sess->iv_offset = c_form->iv.offset;
2723 sess->iv_length = c_form->iv.length;
2724 sess->is_null = is_null;
2726 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2727 c_form->key.length, NULL);
2732 static __rte_always_inline int
2733 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2734 struct cpt_sess_misc *sess)
2736 struct rte_crypto_auth_xform *a_form;
2737 auth_type_t auth_type = 0; /* NULL Auth type */
2738 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2740 a_form = &xform->auth;
2742 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2743 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2744 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2745 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2747 CPT_LOG_DP_ERR("Unknown auth operation");
2751 if (a_form->key.length > 64) {
2752 CPT_LOG_DP_ERR("Auth key length is big");
2756 switch (a_form->algo) {
2757 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2759 case RTE_CRYPTO_AUTH_SHA1:
2760 auth_type = SHA1_TYPE;
2762 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2763 case RTE_CRYPTO_AUTH_SHA256:
2764 auth_type = SHA2_SHA256;
2766 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2767 case RTE_CRYPTO_AUTH_SHA512:
2768 auth_type = SHA2_SHA512;
2770 case RTE_CRYPTO_AUTH_AES_GMAC:
2771 auth_type = GMAC_TYPE;
2774 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2775 case RTE_CRYPTO_AUTH_SHA224:
2776 auth_type = SHA2_SHA224;
2778 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2779 case RTE_CRYPTO_AUTH_SHA384:
2780 auth_type = SHA2_SHA384;
2782 case RTE_CRYPTO_AUTH_MD5_HMAC:
2783 case RTE_CRYPTO_AUTH_MD5:
2784 auth_type = MD5_TYPE;
2786 case RTE_CRYPTO_AUTH_KASUMI_F9:
2787 auth_type = KASUMI_F9_ECB;
2789 * Indicate that direction needs to be taken out
2794 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2795 auth_type = SNOW3G_UIA2;
2798 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2799 auth_type = ZUC_EIA3;
2802 case RTE_CRYPTO_AUTH_NULL:
2806 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2807 case RTE_CRYPTO_AUTH_AES_CMAC:
2808 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2809 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2813 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2818 sess->zsk_flag = zsk_flag;
2819 sess->aes_gcm = aes_gcm;
2820 sess->mac_len = a_form->digest_length;
2821 sess->is_null = is_null;
2823 sess->auth_iv_offset = a_form->iv.offset;
2824 sess->auth_iv_length = a_form->iv.length;
2826 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2827 a_form->key.length, a_form->digest_length);
2832 static __rte_always_inline int
2833 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2834 struct cpt_sess_misc *sess)
2836 struct rte_crypto_auth_xform *a_form;
2837 cipher_type_t enc_type = 0; /* NULL Cipher type */
2838 auth_type_t auth_type = 0; /* NULL Auth type */
2839 void *ctx = SESS_PRIV(sess);
2841 a_form = &xform->auth;
2843 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2844 sess->cpt_op |= CPT_OP_ENCODE;
2845 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2846 sess->cpt_op |= CPT_OP_DECODE;
2848 CPT_LOG_DP_ERR("Unknown auth operation");
2852 switch (a_form->algo) {
2853 case RTE_CRYPTO_AUTH_AES_GMAC:
2855 auth_type = GMAC_TYPE;
2858 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2866 sess->iv_offset = a_form->iv.offset;
2867 sess->iv_length = a_form->iv.length;
2868 sess->mac_len = a_form->digest_length;
2870 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2871 a_form->key.length, NULL);
2872 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2877 static __rte_always_inline void *
2878 alloc_op_meta(struct rte_mbuf *m_src,
2881 struct rte_mempool *cpt_meta_pool)
2885 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2886 if (likely(m_src && (m_src->nb_segs == 1))) {
2890 /* Check if tailroom is sufficient to hold meta data */
2891 tailroom = rte_pktmbuf_tailroom(m_src);
2892 if (likely(tailroom > len + 8)) {
2893 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2894 mphys = m_src->buf_physaddr + m_src->buf_len;
2898 buf->dma_addr = mphys;
2900 /* Indicate that this is a mbuf allocated mdata */
2901 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2906 RTE_SET_USED(m_src);
2909 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2913 buf->dma_addr = rte_mempool_virt2iova(mdata);
2920 * cpt_free_metabuf - free metabuf to mempool.
2921 * @param instance: pointer to instance.
2922 * @param objp: pointer to the metabuf.
2924 static __rte_always_inline void
2925 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2927 bool nofree = ((uintptr_t)mdata & 1ull);
2931 rte_mempool_put(cpt_meta_pool, mdata);
2934 static __rte_always_inline uint32_t
2935 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2936 iov_ptr_t *iovec, uint32_t start_offset)
2939 void *seg_data = NULL;
2940 phys_addr_t seg_phys;
2941 int32_t seg_size = 0;
2948 if (!start_offset) {
2949 seg_data = rte_pktmbuf_mtod(pkt, void *);
2950 seg_phys = rte_pktmbuf_mtophys(pkt);
2951 seg_size = pkt->data_len;
2953 while (start_offset >= pkt->data_len) {
2954 start_offset -= pkt->data_len;
2958 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2959 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
2960 seg_size = pkt->data_len - start_offset;
2966 iovec->bufs[index].vaddr = seg_data;
2967 iovec->bufs[index].dma_addr = seg_phys;
2968 iovec->bufs[index].size = seg_size;
2972 while (unlikely(pkt != NULL)) {
2973 seg_data = rte_pktmbuf_mtod(pkt, void *);
2974 seg_phys = rte_pktmbuf_mtophys(pkt);
2975 seg_size = pkt->data_len;
2979 iovec->bufs[index].vaddr = seg_data;
2980 iovec->bufs[index].dma_addr = seg_phys;
2981 iovec->bufs[index].size = seg_size;
2988 iovec->buf_cnt = index;
2992 static __rte_always_inline uint32_t
2993 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2998 void *seg_data = NULL;
2999 phys_addr_t seg_phys;
3000 uint32_t seg_size = 0;
3003 seg_data = rte_pktmbuf_mtod(pkt, void *);
3004 seg_phys = rte_pktmbuf_mtophys(pkt);
3005 seg_size = pkt->data_len;
3008 if (likely(!pkt->next)) {
3009 uint32_t headroom, tailroom;
3011 *flags |= SINGLE_BUF_INPLACE;
3012 headroom = rte_pktmbuf_headroom(pkt);
3013 tailroom = rte_pktmbuf_tailroom(pkt);
3014 if (likely((headroom >= 24) &&
3016 /* In 83XX this is prerequivisit for Direct mode */
3017 *flags |= SINGLE_BUF_HEADTAILROOM;
3019 param->bufs[0].vaddr = seg_data;
3020 param->bufs[0].dma_addr = seg_phys;
3021 param->bufs[0].size = seg_size;
3024 iovec = param->src_iov;
3025 iovec->bufs[index].vaddr = seg_data;
3026 iovec->bufs[index].dma_addr = seg_phys;
3027 iovec->bufs[index].size = seg_size;
3031 while (unlikely(pkt != NULL)) {
3032 seg_data = rte_pktmbuf_mtod(pkt, void *);
3033 seg_phys = rte_pktmbuf_mtophys(pkt);
3034 seg_size = pkt->data_len;
3039 iovec->bufs[index].vaddr = seg_data;
3040 iovec->bufs[index].dma_addr = seg_phys;
3041 iovec->bufs[index].size = seg_size;
3048 iovec->buf_cnt = index;
3052 static __rte_always_inline int
3053 fill_fc_params(struct rte_crypto_op *cop,
3054 struct cpt_sess_misc *sess_misc,
3055 struct cpt_qp_meta_info *m_info,
3060 struct rte_crypto_sym_op *sym_op = cop->sym;
3063 uint32_t mc_hash_off;
3065 uint64_t d_offs, d_lens;
3066 struct rte_mbuf *m_src, *m_dst;
3067 uint8_t cpt_op = sess_misc->cpt_op;
3068 #ifdef CPT_ALWAYS_USE_SG_MODE
3069 uint8_t inplace = 0;
3071 uint8_t inplace = 1;
3073 fc_params_t fc_params;
3074 char src[SRC_IOV_SIZE];
3075 char dst[SRC_IOV_SIZE];
3079 if (likely(sess_misc->iv_length)) {
3080 flags |= VALID_IV_BUF;
3081 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
3082 uint8_t *, sess_misc->iv_offset);
3083 if (sess_misc->aes_ctr &&
3084 unlikely(sess_misc->iv_length != 16)) {
3085 memcpy((uint8_t *)iv_buf,
3086 rte_crypto_op_ctod_offset(cop,
3087 uint8_t *, sess_misc->iv_offset), 12);
3088 iv_buf[3] = rte_cpu_to_be_32(0x1);
3089 fc_params.iv_buf = iv_buf;
3093 if (sess_misc->zsk_flag) {
3094 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3096 sess_misc->auth_iv_offset);
3097 if (sess_misc->zsk_flag != ZS_EA)
3100 m_src = sym_op->m_src;
3101 m_dst = sym_op->m_dst;
3103 if (sess_misc->aes_gcm) {
3108 d_offs = sym_op->aead.data.offset;
3109 d_lens = sym_op->aead.data.length;
3110 mc_hash_off = sym_op->aead.data.offset +
3111 sym_op->aead.data.length;
3113 aad_data = sym_op->aead.aad.data;
3114 aad_len = sess_misc->aad_length;
3115 if (likely((aad_data + aad_len) ==
3116 rte_pktmbuf_mtod_offset(m_src,
3118 sym_op->aead.data.offset))) {
3119 d_offs = (d_offs - aad_len) | (d_offs << 16);
3120 d_lens = (d_lens + aad_len) | (d_lens << 32);
3122 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
3123 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
3124 fc_params.aad_buf.size = aad_len;
3125 flags |= VALID_AAD_BUF;
3127 d_offs = d_offs << 16;
3128 d_lens = d_lens << 32;
3131 salt = fc_params.iv_buf;
3132 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3133 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3134 sess_misc->salt = *(uint32_t *)salt;
3136 fc_params.iv_buf = salt + 4;
3137 if (likely(sess_misc->mac_len)) {
3138 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3144 /* hmac immediately following data is best case */
3145 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3147 (uint8_t *)sym_op->aead.digest.data)) {
3148 flags |= VALID_MAC_BUF;
3149 fc_params.mac_buf.size = sess_misc->mac_len;
3150 fc_params.mac_buf.vaddr =
3151 sym_op->aead.digest.data;
3152 fc_params.mac_buf.dma_addr =
3153 sym_op->aead.digest.phys_addr;
3158 d_offs = sym_op->cipher.data.offset;
3159 d_lens = sym_op->cipher.data.length;
3160 mc_hash_off = sym_op->cipher.data.offset +
3161 sym_op->cipher.data.length;
3162 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3163 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3165 if (mc_hash_off < (sym_op->auth.data.offset +
3166 sym_op->auth.data.length)){
3167 mc_hash_off = (sym_op->auth.data.offset +
3168 sym_op->auth.data.length);
3170 /* for gmac, salt should be updated like in gcm */
3171 if (unlikely(sess_misc->is_gmac)) {
3173 salt = fc_params.iv_buf;
3174 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3175 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3176 sess_misc->salt = *(uint32_t *)salt;
3178 fc_params.iv_buf = salt + 4;
3180 if (likely(sess_misc->mac_len)) {
3183 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3187 /* hmac immediately following data is best case */
3188 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3190 (uint8_t *)sym_op->auth.digest.data)) {
3191 flags |= VALID_MAC_BUF;
3192 fc_params.mac_buf.size =
3194 fc_params.mac_buf.vaddr =
3195 sym_op->auth.digest.data;
3196 fc_params.mac_buf.dma_addr =
3197 sym_op->auth.digest.phys_addr;
3202 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3203 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3205 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3208 if (likely(!m_dst && inplace)) {
3209 /* Case of single buffer without AAD buf or
3210 * separate mac buf in place and
3213 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3215 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3218 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3224 /* Out of place processing */
3225 fc_params.src_iov = (void *)src;
3226 fc_params.dst_iov = (void *)dst;
3228 /* Store SG I/O in the api for reuse */
3229 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3230 CPT_LOG_DP_ERR("Prepare src iov failed");
3235 if (unlikely(m_dst != NULL)) {
3238 /* Try to make room as much as src has */
3239 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3241 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3242 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3243 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3244 CPT_LOG_DP_ERR("Not enough space in "
3253 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3254 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3260 fc_params.dst_iov = (void *)src;
3264 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3265 mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
3266 m_info->lb_mlen, m_info->pool);
3268 mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
3269 m_info->sg_mlen, m_info->pool);
3271 if (unlikely(mdata == NULL)) {
3272 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3277 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3278 op[0] = (uintptr_t)mdata;
3279 op[1] = (uintptr_t)cop;
3280 op[2] = op[3] = 0; /* Used to indicate auth verify */
3281 space += 4 * sizeof(uint64_t);
3283 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3284 fc_params.meta_buf.dma_addr += space;
3285 fc_params.meta_buf.size -= space;
3287 /* Finally prepare the instruction */
3288 if (cpt_op & CPT_OP_ENCODE)
3289 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3292 *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3295 if (unlikely(*prep_req == NULL)) {
3296 CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
3298 goto free_mdata_and_exit;
3305 free_mdata_and_exit:
3306 free_op_meta(mdata, m_info->pool);
3311 static __rte_always_inline void
3312 compl_auth_verify(struct rte_crypto_op *op,
3317 struct rte_crypto_sym_op *sym_op = op->sym;
3319 if (sym_op->auth.digest.data)
3320 mac = sym_op->auth.digest.data;
3322 mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
3324 sym_op->auth.data.length +
3325 sym_op->auth.data.offset);
3327 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3331 if (memcmp(mac, gen_mac, mac_len))
3332 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
3334 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3337 static __rte_always_inline int
3338 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3340 struct rte_crypto_sym_xform *chain;
3342 CPT_PMD_INIT_FUNC_TRACE();
3344 if (cpt_is_algo_supported(xform))
3349 switch (chain->type) {
3350 case RTE_CRYPTO_SYM_XFORM_AEAD:
3351 if (fill_sess_aead(chain, sess))
3354 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3355 if (fill_sess_cipher(chain, sess))
3358 case RTE_CRYPTO_SYM_XFORM_AUTH:
3359 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3360 if (fill_sess_gmac(chain, sess))
3363 if (fill_sess_auth(chain, sess))
3368 CPT_LOG_DP_ERR("Invalid crypto xform type");
3371 chain = chain->next;
3380 static __rte_always_inline void
3381 find_kasumif9_direction_and_length(uint8_t *src,
3382 uint32_t counter_num_bytes,
3383 uint32_t *addr_length_in_bits,
3384 uint8_t *addr_direction)
3389 while (!found && counter_num_bytes > 0) {
3390 counter_num_bytes--;
3391 if (src[counter_num_bytes] == 0x00)
3393 pos = rte_bsf32(src[counter_num_bytes]);
3395 if (likely(counter_num_bytes > 0)) {
3396 last_byte = src[counter_num_bytes - 1];
3397 *addr_direction = last_byte & 0x1;
3398 *addr_length_in_bits = counter_num_bytes * 8
3402 last_byte = src[counter_num_bytes];
3403 *addr_direction = (last_byte >> (pos + 1)) & 0x1;
3404 *addr_length_in_bits = counter_num_bytes * 8
3412 * This handles all auth only except AES_GMAC
3414 static __rte_always_inline int
3415 fill_digest_params(struct rte_crypto_op *cop,
3416 struct cpt_sess_misc *sess,
3417 struct cpt_qp_meta_info *m_info,
3422 struct rte_crypto_sym_op *sym_op = cop->sym;
3426 uint32_t auth_range_off;
3428 uint64_t d_offs = 0, d_lens;
3429 struct rte_mbuf *m_src, *m_dst;
3430 uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
3431 uint16_t mac_len = sess->mac_len;
3433 char src[SRC_IOV_SIZE];
3437 memset(¶ms, 0, sizeof(fc_params_t));
3439 m_src = sym_op->m_src;
3441 /* For just digest lets force mempool alloc */
3442 mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
3444 if (mdata == NULL) {
3449 mphys = params.meta_buf.dma_addr;
3452 op[0] = (uintptr_t)mdata;
3453 op[1] = (uintptr_t)cop;
3454 op[2] = op[3] = 0; /* Used to indicate auth verify */
3455 space += 4 * sizeof(uint64_t);
3457 auth_range_off = sym_op->auth.data.offset;
3459 flags = VALID_MAC_BUF;
3460 params.src_iov = (void *)src;
3461 if (unlikely(sess->zsk_flag)) {
3463 * Since for Zuc, Kasumi, Snow3g offsets are in bits
3464 * we will send pass through even for auth only case,
3467 d_offs = auth_range_off;
3469 params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
3470 uint8_t *, sess->auth_iv_offset);
3471 if (sess->zsk_flag == K_F9) {
3472 uint32_t length_in_bits, num_bytes;
3473 uint8_t *src, direction = 0;
3475 memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
3478 * This is kasumi f9, take direction from
3481 length_in_bits = cop->sym->auth.data.length;
3482 num_bytes = (length_in_bits >> 3);
3483 src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
3484 find_kasumif9_direction_and_length(src,
3488 length_in_bits -= 64;
3489 cop->sym->auth.data.offset += 64;
3490 d_offs = cop->sym->auth.data.offset;
3491 auth_range_off = d_offs / 8;
3492 cop->sym->auth.data.length = length_in_bits;
3494 /* Store it at end of auth iv */
3495 iv_buf[8] = direction;
3496 params.auth_iv_buf = iv_buf;
3500 d_lens = sym_op->auth.data.length;
3502 params.ctx_buf.vaddr = SESS_PRIV(sess);
3503 params.ctx_buf.dma_addr = sess->ctx_dma_addr;
3505 if (auth_op == CPT_OP_AUTH_GENERATE) {
3506 if (sym_op->auth.digest.data) {
3508 * Digest to be generated
3509 * in separate buffer
3511 params.mac_buf.size =
3513 params.mac_buf.vaddr =
3514 sym_op->auth.digest.data;
3515 params.mac_buf.dma_addr =
3516 sym_op->auth.digest.phys_addr;
3518 uint32_t off = sym_op->auth.data.offset +
3519 sym_op->auth.data.length;
3520 int32_t dlen, space;
3522 m_dst = sym_op->m_dst ?
3523 sym_op->m_dst : sym_op->m_src;
3524 dlen = rte_pktmbuf_pkt_len(m_dst);
3526 space = off + mac_len - dlen;
3528 if (!rte_pktmbuf_append(m_dst, space)) {
3529 CPT_LOG_DP_ERR("Failed to extend "
3530 "mbuf by %uB", space);
3532 goto free_mdata_and_exit;
3535 params.mac_buf.vaddr =
3536 rte_pktmbuf_mtod_offset(m_dst, void *, off);
3537 params.mac_buf.dma_addr =
3538 rte_pktmbuf_mtophys_offset(m_dst, off);
3539 params.mac_buf.size = mac_len;
3542 /* Need space for storing generated mac */
3543 params.mac_buf.vaddr = (uint8_t *)mdata + space;
3544 params.mac_buf.dma_addr = mphys + space;
3545 params.mac_buf.size = mac_len;
3546 space += RTE_ALIGN_CEIL(mac_len, 8);
3547 op[2] = (uintptr_t)params.mac_buf.vaddr;
3551 params.meta_buf.vaddr = (uint8_t *)mdata + space;
3552 params.meta_buf.dma_addr = mphys + space;
3553 params.meta_buf.size -= space;
3555 /* Out of place processing */
3556 params.src_iov = (void *)src;
3558 /*Store SG I/O in the api for reuse */
3559 if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
3560 CPT_LOG_DP_ERR("Prepare src iov failed");
3562 goto free_mdata_and_exit;
3565 *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
3566 if (unlikely(*prep_req == NULL)) {
3568 goto free_mdata_and_exit;
3575 free_mdata_and_exit:
3576 free_op_meta(mdata, m_info->pool);
3581 #endif /*_CPT_UCODE_H_ */