1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
10 #include "cpt_common.h"
11 #include "cpt_hw_types.h"
12 #include "cpt_mcode_defines.h"
15 * This file defines functions that are interfaces to microcode spec.
19 static uint8_t zuc_d[32] = {
20 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
21 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
22 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
23 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
26 static __rte_always_inline int
27 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
30 * Microcode only supports the following combination.
31 * Encryption followed by authentication
32 * Authentication followed by decryption
35 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
36 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
37 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
38 /* Unsupported as of now by microcode */
39 CPT_LOG_DP_ERR("Unsupported combination");
42 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
43 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
44 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
45 /* For GMAC auth there is no cipher operation */
46 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
47 xform->next->auth.algo !=
48 RTE_CRYPTO_AUTH_AES_GMAC) {
49 /* Unsupported as of now by microcode */
50 CPT_LOG_DP_ERR("Unsupported combination");
58 static __rte_always_inline void
59 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
63 for (i = 0; i < 4; i++) {
65 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
66 (ck[base + 2] << 8) | (ck[base + 3]);
67 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
71 static __rte_always_inline void
72 cpt_fc_salt_update(void *ctx,
75 struct cpt_ctx *cpt_ctx = ctx;
76 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
79 static __rte_always_inline int
80 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
92 static __rte_always_inline int
93 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
110 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
115 key_len = key_len / 2;
116 if (unlikely(key_len == CPT_BYTE_24)) {
117 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
120 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
126 if (unlikely(key_len != 16))
128 /* No support for AEAD yet */
129 if (unlikely(cpt_ctx->hash_type))
131 fc_type = ZUC_SNOW3G;
135 if (unlikely(key_len != 16))
137 /* No support for AEAD yet */
138 if (unlikely(cpt_ctx->hash_type))
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->fc_type = ZUC_SNOW3G;
186 cpt_ctx->zsk_flags = 0;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
194 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
195 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
196 cpt_ctx->fc_type = ZUC_SNOW3G;
197 cpt_ctx->zsk_flags = 0;
200 static __rte_always_inline void
201 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
205 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
206 cpt_ctx->zsk_flags = 0;
207 cpt_ctx->fc_type = KASUMI;
210 static __rte_always_inline void
211 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
214 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
215 cpt_ctx->zsk_flags = 0;
216 cpt_ctx->fc_type = KASUMI;
219 static __rte_always_inline int
220 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
221 uint16_t key_len, uint8_t *salt)
223 struct cpt_ctx *cpt_ctx = ctx;
224 mc_fc_context_t *fctx = &cpt_ctx->fctx;
225 uint64_t *ctrl_flags = NULL;
228 /* Validate key before proceeding */
229 fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
230 if (unlikely(fc_type == -1))
233 if (fc_type == FC_GEN) {
234 cpt_ctx->fc_type = FC_GEN;
235 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
236 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
238 * We need to always say IV is from DPTR as user can
239 * sometimes iverride IV per operation.
241 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
246 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
249 /* CPT performs DES using 3DES with the 8B DES-key
250 * replicated 2 more times to match the 24B 3DES-key.
251 * Eg. If org. key is "0x0a 0x0b", then new key is
252 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
255 /* Skipping the first 8B as it will be copied
256 * in the regular code flow
258 memcpy(fctx->enc.encr_key+key_len, key, key_len);
259 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
263 /* For DES3_ECB IV need to be from CTX. */
264 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
270 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
273 /* Even though iv source is from dptr,
274 * aes_gcm salt is taken from ctx
277 memcpy(fctx->enc.encr_iv, salt, 4);
278 /* Assuming it was just salt update
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 key_len = key_len / 2;
288 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
290 /* Copy key2 for XTS into ipad */
291 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
292 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
295 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
298 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
301 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
304 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
310 /* Only for FC_GEN case */
312 /* For GMAC auth, cipher must be NULL */
313 if (cpt_ctx->hash_type != GMAC_TYPE)
314 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
316 memcpy(fctx->enc.encr_key, key, key_len);
319 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
322 cpt_ctx->enc_cipher = type;
327 static __rte_always_inline uint32_t
328 fill_sg_comp(sg_comp_t *list,
330 phys_addr_t dma_addr,
333 sg_comp_t *to = &list[i>>2];
335 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
336 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
341 static __rte_always_inline uint32_t
342 fill_sg_comp_from_buf(sg_comp_t *list,
346 sg_comp_t *to = &list[i>>2];
348 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
349 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
354 static __rte_always_inline uint32_t
355 fill_sg_comp_from_buf_min(sg_comp_t *list,
360 sg_comp_t *to = &list[i >> 2];
361 uint32_t size = *psize;
364 e_len = (size > from->size) ? from->size : size;
365 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
366 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
373 * This fills the MC expected SGIO list
374 * from IOV given by user.
376 static __rte_always_inline uint32_t
377 fill_sg_comp_from_iov(sg_comp_t *list,
379 iov_ptr_t *from, uint32_t from_offset,
380 uint32_t *psize, buf_ptr_t *extra_buf,
381 uint32_t extra_offset)
384 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
385 uint32_t size = *psize - extra_len;
389 for (j = 0; (j < from->buf_cnt) && size; j++) {
390 phys_addr_t e_dma_addr;
392 sg_comp_t *to = &list[i >> 2];
397 if (unlikely(from_offset)) {
398 if (from_offset >= bufs[j].size) {
399 from_offset -= bufs[j].size;
402 e_dma_addr = bufs[j].dma_addr + from_offset;
403 e_len = (size > (bufs[j].size - from_offset)) ?
404 (bufs[j].size - from_offset) : size;
407 e_dma_addr = bufs[j].dma_addr;
408 e_len = (size > bufs[j].size) ?
412 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
413 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
415 if (extra_len && (e_len >= extra_offset)) {
416 /* Break the data at given offset */
417 uint32_t next_len = e_len - extra_offset;
418 phys_addr_t next_dma = e_dma_addr + extra_offset;
423 e_len = extra_offset;
425 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
428 /* Insert extra data ptr */
433 rte_cpu_to_be_16(extra_buf->size);
435 rte_cpu_to_be_64(extra_buf->dma_addr);
437 /* size already decremented by extra len */
440 /* insert the rest of the data */
444 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
445 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
454 extra_offset -= size;
462 static __rte_always_inline int
463 cpt_enc_hmac_prep(uint32_t flags,
466 fc_params_t *fc_params,
470 uint32_t iv_offset = 0;
471 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
472 struct cpt_ctx *cpt_ctx;
473 uint32_t cipher_type, hash_type;
474 uint32_t mac_len, size;
476 struct cpt_request_info *req;
477 buf_ptr_t *meta_p, *aad_buf = NULL;
478 uint32_t encr_offset, auth_offset;
479 uint32_t encr_data_len, auth_data_len, aad_len = 0;
480 uint32_t passthrough_len = 0;
481 void *m_vaddr, *offset_vaddr;
482 uint64_t m_dma, offset_dma, ctx_dma;
483 vq_cmd_word0_t vq_cmd_w0;
484 vq_cmd_word3_t vq_cmd_w3;
488 opcode_info_t opcode;
490 meta_p = &fc_params->meta_buf;
491 m_vaddr = meta_p->vaddr;
492 m_dma = meta_p->dma_addr;
493 m_size = meta_p->size;
495 encr_offset = ENCR_OFFSET(d_offs);
496 auth_offset = AUTH_OFFSET(d_offs);
497 encr_data_len = ENCR_DLEN(d_lens);
498 auth_data_len = AUTH_DLEN(d_lens);
499 if (unlikely(flags & VALID_AAD_BUF)) {
501 * We dont support both aad
502 * and auth data separately
506 aad_len = fc_params->aad_buf.size;
507 aad_buf = &fc_params->aad_buf;
509 cpt_ctx = fc_params->ctx_buf.vaddr;
510 cipher_type = cpt_ctx->enc_cipher;
511 hash_type = cpt_ctx->hash_type;
512 mac_len = cpt_ctx->mac_len;
515 * Save initial space that followed app data for completion code &
516 * alternate completion code to fall in same cache line as app data
518 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
519 m_dma += COMPLETION_CODE_SIZE;
520 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
523 c_vaddr = (uint8_t *)m_vaddr + size;
524 c_dma = m_dma + size;
525 size += sizeof(cpt_res_s_t);
527 m_vaddr = (uint8_t *)m_vaddr + size;
531 /* start cpt request info struct at 8 byte boundary */
532 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
535 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
537 size += sizeof(struct cpt_request_info);
538 m_vaddr = (uint8_t *)m_vaddr + size;
542 if (hash_type == GMAC_TYPE)
545 if (unlikely(!(flags & VALID_IV_BUF))) {
547 iv_offset = ENCR_IV_OFFSET(d_offs);
550 if (unlikely(flags & VALID_AAD_BUF)) {
552 * When AAD is given, data above encr_offset is pass through
553 * Since AAD is given as separate pointer and not as offset,
554 * this is a special case as we need to fragment input data
555 * into passthrough + encr_data and then insert AAD in between.
557 if (hash_type != GMAC_TYPE) {
558 passthrough_len = encr_offset;
559 auth_offset = passthrough_len + iv_len;
560 encr_offset = passthrough_len + aad_len + iv_len;
561 auth_data_len = aad_len + encr_data_len;
563 passthrough_len = 16 + aad_len;
564 auth_offset = passthrough_len + iv_len;
565 auth_data_len = aad_len;
568 encr_offset += iv_len;
569 auth_offset += iv_len;
573 opcode.s.major = CPT_MAJOR_OP_FC;
576 auth_dlen = auth_offset + auth_data_len;
577 enc_dlen = encr_data_len + encr_offset;
578 if (unlikely(encr_data_len & 0xf)) {
579 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
580 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
581 else if (likely((cipher_type == AES_CBC) ||
582 (cipher_type == AES_ECB)))
583 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
586 if (unlikely(hash_type == GMAC_TYPE)) {
587 encr_offset = auth_dlen;
591 if (unlikely(auth_dlen > enc_dlen)) {
592 inputlen = auth_dlen;
593 outputlen = auth_dlen + mac_len;
596 outputlen = enc_dlen + mac_len;
601 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
602 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
604 * In 83XX since we have a limitation of
605 * IV & Offset control word not part of instruction
606 * and need to be part of Data Buffer, we check if
607 * head room is there and then only do the Direct mode processing
609 if (likely((flags & SINGLE_BUF_INPLACE) &&
610 (flags & SINGLE_BUF_HEADTAILROOM))) {
611 void *dm_vaddr = fc_params->bufs[0].vaddr;
612 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
614 * This flag indicates that there is 24 bytes head room and
615 * 8 bytes tail room available, so that we get to do
616 * DIRECT MODE with limitation
619 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
620 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
623 req->ist.ei1 = offset_dma;
624 /* RPTR should just exclude offset control word */
625 req->ist.ei2 = dm_dma_addr - iv_len;
626 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
627 + outputlen - iv_len);
629 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
631 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
633 if (likely(iv_len)) {
634 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
636 uint64_t *src = fc_params->iv_buf;
641 *(uint64_t *)offset_vaddr =
642 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
643 ((uint64_t)iv_offset << 8) |
644 ((uint64_t)auth_offset));
647 uint32_t i, g_size_bytes, s_size_bytes;
648 uint64_t dptr_dma, rptr_dma;
649 sg_comp_t *gather_comp;
650 sg_comp_t *scatter_comp;
653 /* This falls under strict SG mode */
654 offset_vaddr = m_vaddr;
656 size = OFF_CTRL_LEN + iv_len;
658 m_vaddr = (uint8_t *)m_vaddr + size;
662 opcode.s.major |= CPT_DMA_MODE;
664 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
666 if (likely(iv_len)) {
667 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
669 uint64_t *src = fc_params->iv_buf;
674 *(uint64_t *)offset_vaddr =
675 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
676 ((uint64_t)iv_offset << 8) |
677 ((uint64_t)auth_offset));
679 /* DPTR has SG list */
683 ((uint16_t *)in_buffer)[0] = 0;
684 ((uint16_t *)in_buffer)[1] = 0;
686 /* TODO Add error check if space will be sufficient */
687 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
695 /* Offset control word that includes iv */
696 i = fill_sg_comp(gather_comp, i, offset_dma,
697 OFF_CTRL_LEN + iv_len);
700 size = inputlen - iv_len;
702 uint32_t aad_offset = aad_len ? passthrough_len : 0;
704 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
705 i = fill_sg_comp_from_buf_min(gather_comp, i,
709 i = fill_sg_comp_from_iov(gather_comp, i,
712 aad_buf, aad_offset);
715 if (unlikely(size)) {
716 CPT_LOG_DP_ERR("Insufficient buffer space,"
717 " size %d needed", size);
718 return ERR_BAD_INPUT_ARG;
721 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
722 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
725 * Output Scatter list
729 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
732 if (likely(iv_len)) {
733 i = fill_sg_comp(scatter_comp, i,
734 offset_dma + OFF_CTRL_LEN,
738 /* output data or output data + digest*/
739 if (unlikely(flags & VALID_MAC_BUF)) {
740 size = outputlen - iv_len - mac_len;
742 uint32_t aad_offset =
743 aad_len ? passthrough_len : 0;
745 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
746 i = fill_sg_comp_from_buf_min(
752 i = fill_sg_comp_from_iov(scatter_comp,
761 return ERR_BAD_INPUT_ARG;
765 i = fill_sg_comp_from_buf(scatter_comp, i,
766 &fc_params->mac_buf);
769 /* Output including mac */
770 size = outputlen - iv_len;
772 uint32_t aad_offset =
773 aad_len ? passthrough_len : 0;
775 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
776 i = fill_sg_comp_from_buf_min(
782 i = fill_sg_comp_from_iov(scatter_comp,
790 if (unlikely(size)) {
791 CPT_LOG_DP_ERR("Insufficient buffer"
792 " space, size %d needed",
794 return ERR_BAD_INPUT_ARG;
798 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
799 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
801 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
803 /* This is DPTR len incase of SG mode */
804 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
806 m_vaddr = (uint8_t *)m_vaddr + size;
810 /* cpt alternate completion address saved earlier */
811 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
812 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
813 rptr_dma = c_dma - 8;
815 req->ist.ei1 = dptr_dma;
816 req->ist.ei2 = rptr_dma;
819 /* First 16-bit swap then 64-bit swap */
820 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
821 * to eliminate all the swapping
823 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
825 ctx_dma = fc_params->ctx_buf.dma_addr +
826 offsetof(struct cpt_ctx, fctx);
830 vq_cmd_w3.s.cptr = ctx_dma;
832 /* 16 byte aligned cpt res address */
833 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
834 *req->completion_addr = COMPLETION_CODE_INIT;
835 req->comp_baddr = c_dma;
837 /* Fill microcode part of instruction */
838 req->ist.ei0 = vq_cmd_w0.u64;
839 req->ist.ei3 = vq_cmd_w3.u64;
847 static __rte_always_inline int
848 cpt_dec_hmac_prep(uint32_t flags,
851 fc_params_t *fc_params,
855 uint32_t iv_offset = 0, size;
856 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
857 struct cpt_ctx *cpt_ctx;
858 int32_t hash_type, mac_len, m_size;
860 struct cpt_request_info *req;
861 buf_ptr_t *meta_p, *aad_buf = NULL;
862 uint32_t encr_offset, auth_offset;
863 uint32_t encr_data_len, auth_data_len, aad_len = 0;
864 uint32_t passthrough_len = 0;
865 void *m_vaddr, *offset_vaddr;
866 uint64_t m_dma, offset_dma, ctx_dma;
867 opcode_info_t opcode;
868 vq_cmd_word0_t vq_cmd_w0;
869 vq_cmd_word3_t vq_cmd_w3;
873 meta_p = &fc_params->meta_buf;
874 m_vaddr = meta_p->vaddr;
875 m_dma = meta_p->dma_addr;
876 m_size = meta_p->size;
878 encr_offset = ENCR_OFFSET(d_offs);
879 auth_offset = AUTH_OFFSET(d_offs);
880 encr_data_len = ENCR_DLEN(d_lens);
881 auth_data_len = AUTH_DLEN(d_lens);
883 if (unlikely(flags & VALID_AAD_BUF)) {
885 * We dont support both aad
886 * and auth data separately
890 aad_len = fc_params->aad_buf.size;
891 aad_buf = &fc_params->aad_buf;
894 cpt_ctx = fc_params->ctx_buf.vaddr;
895 hash_type = cpt_ctx->hash_type;
896 mac_len = cpt_ctx->mac_len;
898 if (hash_type == GMAC_TYPE)
901 if (unlikely(!(flags & VALID_IV_BUF))) {
903 iv_offset = ENCR_IV_OFFSET(d_offs);
906 if (unlikely(flags & VALID_AAD_BUF)) {
908 * When AAD is given, data above encr_offset is pass through
909 * Since AAD is given as separate pointer and not as offset,
910 * this is a special case as we need to fragment input data
911 * into passthrough + encr_data and then insert AAD in between.
913 if (hash_type != GMAC_TYPE) {
914 passthrough_len = encr_offset;
915 auth_offset = passthrough_len + iv_len;
916 encr_offset = passthrough_len + aad_len + iv_len;
917 auth_data_len = aad_len + encr_data_len;
919 passthrough_len = 16 + aad_len;
920 auth_offset = passthrough_len + iv_len;
921 auth_data_len = aad_len;
924 encr_offset += iv_len;
925 auth_offset += iv_len;
929 * Save initial space that followed app data for completion code &
930 * alternate completion code to fall in same cache line as app data
932 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
933 m_dma += COMPLETION_CODE_SIZE;
934 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
936 c_vaddr = (uint8_t *)m_vaddr + size;
937 c_dma = m_dma + size;
938 size += sizeof(cpt_res_s_t);
940 m_vaddr = (uint8_t *)m_vaddr + size;
944 /* start cpt request info structure at 8 byte alignment */
945 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
948 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
950 size += sizeof(struct cpt_request_info);
951 m_vaddr = (uint8_t *)m_vaddr + size;
956 opcode.s.major = CPT_MAJOR_OP_FC;
959 enc_dlen = encr_offset + encr_data_len;
960 auth_dlen = auth_offset + auth_data_len;
962 if (auth_dlen > enc_dlen) {
963 inputlen = auth_dlen + mac_len;
964 outputlen = auth_dlen;
966 inputlen = enc_dlen + mac_len;
967 outputlen = enc_dlen;
970 if (hash_type == GMAC_TYPE)
971 encr_offset = inputlen;
974 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
975 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
978 * In 83XX since we have a limitation of
979 * IV & Offset control word not part of instruction
980 * and need to be part of Data Buffer, we check if
981 * head room is there and then only do the Direct mode processing
983 if (likely((flags & SINGLE_BUF_INPLACE) &&
984 (flags & SINGLE_BUF_HEADTAILROOM))) {
985 void *dm_vaddr = fc_params->bufs[0].vaddr;
986 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
988 * This flag indicates that there is 24 bytes head room and
989 * 8 bytes tail room available, so that we get to do
990 * DIRECT MODE with limitation
993 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
994 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
995 req->ist.ei1 = offset_dma;
997 /* RPTR should just exclude offset control word */
998 req->ist.ei2 = dm_dma_addr - iv_len;
1000 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr +
1001 outputlen - iv_len);
1002 /* since this is decryption,
1003 * don't touch the content of
1004 * alternate ccode space as it contains
1008 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1010 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1012 if (likely(iv_len)) {
1013 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1015 uint64_t *src = fc_params->iv_buf;
1020 *(uint64_t *)offset_vaddr =
1021 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1022 ((uint64_t)iv_offset << 8) |
1023 ((uint64_t)auth_offset));
1026 uint64_t dptr_dma, rptr_dma;
1027 uint32_t g_size_bytes, s_size_bytes;
1028 sg_comp_t *gather_comp;
1029 sg_comp_t *scatter_comp;
1033 /* This falls under strict SG mode */
1034 offset_vaddr = m_vaddr;
1036 size = OFF_CTRL_LEN + iv_len;
1038 m_vaddr = (uint8_t *)m_vaddr + size;
1042 opcode.s.major |= CPT_DMA_MODE;
1044 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1046 if (likely(iv_len)) {
1047 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
1049 uint64_t *src = fc_params->iv_buf;
1054 *(uint64_t *)offset_vaddr =
1055 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
1056 ((uint64_t)iv_offset << 8) |
1057 ((uint64_t)auth_offset));
1059 /* DPTR has SG list */
1060 in_buffer = m_vaddr;
1063 ((uint16_t *)in_buffer)[0] = 0;
1064 ((uint16_t *)in_buffer)[1] = 0;
1066 /* TODO Add error check if space will be sufficient */
1067 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1074 /* Offset control word that includes iv */
1075 i = fill_sg_comp(gather_comp, i, offset_dma,
1076 OFF_CTRL_LEN + iv_len);
1078 /* Add input data */
1079 if (flags & VALID_MAC_BUF) {
1080 size = inputlen - iv_len - mac_len;
1082 /* input data only */
1083 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1084 i = fill_sg_comp_from_buf_min(
1089 uint32_t aad_offset = aad_len ?
1090 passthrough_len : 0;
1092 i = fill_sg_comp_from_iov(gather_comp,
1100 return ERR_BAD_INPUT_ARG;
1105 i = fill_sg_comp_from_buf(gather_comp, i,
1106 &fc_params->mac_buf);
1109 /* input data + mac */
1110 size = inputlen - iv_len;
1112 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1113 i = fill_sg_comp_from_buf_min(
1118 uint32_t aad_offset = aad_len ?
1119 passthrough_len : 0;
1121 if (!fc_params->src_iov)
1122 return ERR_BAD_INPUT_ARG;
1124 i = fill_sg_comp_from_iov(
1133 return ERR_BAD_INPUT_ARG;
1136 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1137 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1140 * Output Scatter List
1145 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1149 i = fill_sg_comp(scatter_comp, i,
1150 offset_dma + OFF_CTRL_LEN,
1154 /* Add output data */
1155 size = outputlen - iv_len;
1157 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
1158 /* handle single buffer here */
1159 i = fill_sg_comp_from_buf_min(scatter_comp, i,
1163 uint32_t aad_offset = aad_len ?
1164 passthrough_len : 0;
1166 if (!fc_params->dst_iov)
1167 return ERR_BAD_INPUT_ARG;
1169 i = fill_sg_comp_from_iov(scatter_comp, i,
1170 fc_params->dst_iov, 0,
1176 return ERR_BAD_INPUT_ARG;
1179 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1180 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1182 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1184 /* This is DPTR len incase of SG mode */
1185 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1187 m_vaddr = (uint8_t *)m_vaddr + size;
1191 /* cpt alternate completion address saved earlier */
1192 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1193 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1194 rptr_dma = c_dma - 8;
1195 size += COMPLETION_CODE_SIZE;
1197 req->ist.ei1 = dptr_dma;
1198 req->ist.ei2 = rptr_dma;
1201 /* First 16-bit swap then 64-bit swap */
1202 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1203 * to eliminate all the swapping
1205 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1207 ctx_dma = fc_params->ctx_buf.dma_addr +
1208 offsetof(struct cpt_ctx, fctx);
1211 vq_cmd_w3.s.grp = 0;
1212 vq_cmd_w3.s.cptr = ctx_dma;
1214 /* 16 byte aligned cpt res address */
1215 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1216 *req->completion_addr = COMPLETION_CODE_INIT;
1217 req->comp_baddr = c_dma;
1219 /* Fill microcode part of instruction */
1220 req->ist.ei0 = vq_cmd_w0.u64;
1221 req->ist.ei3 = vq_cmd_w3.u64;
1229 static __rte_always_inline int
1230 cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
1233 fc_params_t *params,
1238 int32_t inputlen, outputlen;
1239 struct cpt_ctx *cpt_ctx;
1240 uint32_t mac_len = 0;
1242 struct cpt_request_info *req;
1244 uint32_t encr_offset = 0, auth_offset = 0;
1245 uint32_t encr_data_len = 0, auth_data_len = 0;
1246 int flags, iv_len = 16, m_size;
1247 void *m_vaddr, *c_vaddr;
1248 uint64_t m_dma, c_dma, offset_ctrl;
1249 uint64_t *offset_vaddr, offset_dma;
1250 uint32_t *iv_s, iv[4];
1251 vq_cmd_word0_t vq_cmd_w0;
1252 vq_cmd_word3_t vq_cmd_w3;
1253 opcode_info_t opcode;
1255 buf_p = ¶ms->meta_buf;
1256 m_vaddr = buf_p->vaddr;
1257 m_dma = buf_p->dma_addr;
1258 m_size = buf_p->size;
1260 cpt_ctx = params->ctx_buf.vaddr;
1261 flags = cpt_ctx->zsk_flags;
1262 mac_len = cpt_ctx->mac_len;
1263 snow3g = cpt_ctx->snow3g;
1266 * Save initial space that followed app data for completion code &
1267 * alternate completion code to fall in same cache line as app data
1269 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1270 m_dma += COMPLETION_CODE_SIZE;
1271 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1274 c_vaddr = (uint8_t *)m_vaddr + size;
1275 c_dma = m_dma + size;
1276 size += sizeof(cpt_res_s_t);
1278 m_vaddr = (uint8_t *)m_vaddr + size;
1282 /* Reserve memory for cpt request info */
1285 size = sizeof(struct cpt_request_info);
1286 m_vaddr = (uint8_t *)m_vaddr + size;
1290 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1292 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1293 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1294 (0 << 3) | (flags & 0x7));
1298 * Microcode expects offsets in bytes
1299 * TODO: Rounding off
1301 auth_data_len = AUTH_DLEN(d_lens);
1304 auth_offset = AUTH_OFFSET(d_offs);
1305 auth_offset = auth_offset / 8;
1307 /* consider iv len */
1308 auth_offset += iv_len;
1310 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1311 outputlen = mac_len;
1313 offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
1318 * Microcode expects offsets in bytes
1319 * TODO: Rounding off
1321 encr_data_len = ENCR_DLEN(d_lens);
1323 encr_offset = ENCR_OFFSET(d_offs);
1324 encr_offset = encr_offset / 8;
1325 /* consider iv len */
1326 encr_offset += iv_len;
1328 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1329 outputlen = inputlen;
1331 /* iv offset is 0 */
1332 offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1336 iv_s = (flags == 0x1) ? params->auth_iv_buf :
1341 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1342 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1345 for (j = 0; j < 4; j++)
1346 iv[j] = iv_s[3 - j];
1348 /* ZUC doesn't need a swap */
1349 for (j = 0; j < 4; j++)
1354 * GP op header, lengths are expected in bits.
1357 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1358 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1361 * In 83XX since we have a limitation of
1362 * IV & Offset control word not part of instruction
1363 * and need to be part of Data Buffer, we check if
1364 * head room is there and then only do the Direct mode processing
1366 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1367 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1368 void *dm_vaddr = params->bufs[0].vaddr;
1369 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1371 * This flag indicates that there is 24 bytes head room and
1372 * 8 bytes tail room available, so that we get to do
1373 * DIRECT MODE with limitation
1376 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1377 OFF_CTRL_LEN - iv_len);
1378 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1381 req->ist.ei1 = offset_dma;
1382 /* RPTR should just exclude offset control word */
1383 req->ist.ei2 = dm_dma_addr - iv_len;
1384 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1385 + outputlen - iv_len);
1387 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1389 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1391 if (likely(iv_len)) {
1392 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1394 memcpy(iv_d, iv, 16);
1397 *offset_vaddr = offset_ctrl;
1399 uint32_t i, g_size_bytes, s_size_bytes;
1400 uint64_t dptr_dma, rptr_dma;
1401 sg_comp_t *gather_comp;
1402 sg_comp_t *scatter_comp;
1406 /* save space for iv */
1407 offset_vaddr = m_vaddr;
1410 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1411 m_dma += OFF_CTRL_LEN + iv_len;
1412 m_size -= OFF_CTRL_LEN + iv_len;
1414 opcode.s.major |= CPT_DMA_MODE;
1416 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1418 /* DPTR has SG list */
1419 in_buffer = m_vaddr;
1422 ((uint16_t *)in_buffer)[0] = 0;
1423 ((uint16_t *)in_buffer)[1] = 0;
1425 /* TODO Add error check if space will be sufficient */
1426 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1433 /* Offset control word followed by iv */
1435 i = fill_sg_comp(gather_comp, i, offset_dma,
1436 OFF_CTRL_LEN + iv_len);
1438 /* iv offset is 0 */
1439 *offset_vaddr = offset_ctrl;
1441 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1442 memcpy(iv_d, iv, 16);
1445 size = inputlen - iv_len;
1447 i = fill_sg_comp_from_iov(gather_comp, i,
1451 return ERR_BAD_INPUT_ARG;
1453 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1454 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1457 * Output Scatter List
1462 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1465 /* IV in SLIST only for EEA3 & UEA2 */
1470 i = fill_sg_comp(scatter_comp, i,
1471 offset_dma + OFF_CTRL_LEN, iv_len);
1474 /* Add output data */
1475 if (req_flags & VALID_MAC_BUF) {
1476 size = outputlen - iv_len - mac_len;
1478 i = fill_sg_comp_from_iov(scatter_comp, i,
1483 return ERR_BAD_INPUT_ARG;
1488 i = fill_sg_comp_from_buf(scatter_comp, i,
1492 /* Output including mac */
1493 size = outputlen - iv_len;
1495 i = fill_sg_comp_from_iov(scatter_comp, i,
1500 return ERR_BAD_INPUT_ARG;
1503 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1504 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1506 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1508 /* This is DPTR len incase of SG mode */
1509 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1511 m_vaddr = (uint8_t *)m_vaddr + size;
1515 /* cpt alternate completion address saved earlier */
1516 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1517 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1518 rptr_dma = c_dma - 8;
1520 req->ist.ei1 = dptr_dma;
1521 req->ist.ei2 = rptr_dma;
1524 /* First 16-bit swap then 64-bit swap */
1525 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1526 * to eliminate all the swapping
1528 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1532 vq_cmd_w3.s.grp = 0;
1533 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1534 offsetof(struct cpt_ctx, zs_ctx);
1536 /* 16 byte aligned cpt res address */
1537 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1538 *req->completion_addr = COMPLETION_CODE_INIT;
1539 req->comp_baddr = c_dma;
1541 /* Fill microcode part of instruction */
1542 req->ist.ei0 = vq_cmd_w0.u64;
1543 req->ist.ei3 = vq_cmd_w3.u64;
1551 static __rte_always_inline int
1552 cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
1555 fc_params_t *params,
1560 int32_t inputlen = 0, outputlen;
1561 struct cpt_ctx *cpt_ctx;
1562 uint8_t snow3g, iv_len = 16;
1563 struct cpt_request_info *req;
1565 uint32_t encr_offset;
1566 uint32_t encr_data_len;
1568 void *m_vaddr, *c_vaddr;
1569 uint64_t m_dma, c_dma;
1570 uint64_t *offset_vaddr, offset_dma;
1571 uint32_t *iv_s, iv[4], j;
1572 vq_cmd_word0_t vq_cmd_w0;
1573 vq_cmd_word3_t vq_cmd_w3;
1574 opcode_info_t opcode;
1576 buf_p = ¶ms->meta_buf;
1577 m_vaddr = buf_p->vaddr;
1578 m_dma = buf_p->dma_addr;
1579 m_size = buf_p->size;
1582 * Microcode expects offsets in bytes
1583 * TODO: Rounding off
1585 encr_offset = ENCR_OFFSET(d_offs) / 8;
1586 encr_data_len = ENCR_DLEN(d_lens);
1588 cpt_ctx = params->ctx_buf.vaddr;
1589 flags = cpt_ctx->zsk_flags;
1590 snow3g = cpt_ctx->snow3g;
1592 * Save initial space that followed app data for completion code &
1593 * alternate completion code to fall in same cache line as app data
1595 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1596 m_dma += COMPLETION_CODE_SIZE;
1597 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1600 c_vaddr = (uint8_t *)m_vaddr + size;
1601 c_dma = m_dma + size;
1602 size += sizeof(cpt_res_s_t);
1604 m_vaddr = (uint8_t *)m_vaddr + size;
1608 /* Reserve memory for cpt request info */
1611 size = sizeof(struct cpt_request_info);
1612 m_vaddr = (uint8_t *)m_vaddr + size;
1616 opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
1618 /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
1619 opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
1620 (0 << 3) | (flags & 0x7));
1622 /* consider iv len */
1623 encr_offset += iv_len;
1625 inputlen = encr_offset +
1626 (RTE_ALIGN(encr_data_len, 8) / 8);
1627 outputlen = inputlen;
1630 iv_s = params->iv_buf;
1633 * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
1634 * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
1637 for (j = 0; j < 4; j++)
1638 iv[j] = iv_s[3 - j];
1640 /* ZUC doesn't need a swap */
1641 for (j = 0; j < 4; j++)
1646 * GP op header, lengths are expected in bits.
1649 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1652 * In 83XX since we have a limitation of
1653 * IV & Offset control word not part of instruction
1654 * and need to be part of Data Buffer, we check if
1655 * head room is there and then only do the Direct mode processing
1657 if (likely((req_flags & SINGLE_BUF_INPLACE) &&
1658 (req_flags & SINGLE_BUF_HEADTAILROOM))) {
1659 void *dm_vaddr = params->bufs[0].vaddr;
1660 uint64_t dm_dma_addr = params->bufs[0].dma_addr;
1662 * This flag indicates that there is 24 bytes head room and
1663 * 8 bytes tail room available, so that we get to do
1664 * DIRECT MODE with limitation
1667 offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
1668 OFF_CTRL_LEN - iv_len);
1669 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
1672 req->ist.ei1 = offset_dma;
1673 /* RPTR should just exclude offset control word */
1674 req->ist.ei2 = dm_dma_addr - iv_len;
1675 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
1676 + outputlen - iv_len);
1678 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
1680 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1682 if (likely(iv_len)) {
1683 uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
1685 memcpy(iv_d, iv, 16);
1688 /* iv offset is 0 */
1689 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1691 uint32_t i, g_size_bytes, s_size_bytes;
1692 uint64_t dptr_dma, rptr_dma;
1693 sg_comp_t *gather_comp;
1694 sg_comp_t *scatter_comp;
1698 /* save space for offset and iv... */
1699 offset_vaddr = m_vaddr;
1702 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1703 m_dma += OFF_CTRL_LEN + iv_len;
1704 m_size -= OFF_CTRL_LEN + iv_len;
1706 opcode.s.major |= CPT_DMA_MODE;
1708 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1710 /* DPTR has SG list */
1711 in_buffer = m_vaddr;
1714 ((uint16_t *)in_buffer)[0] = 0;
1715 ((uint16_t *)in_buffer)[1] = 0;
1717 /* TODO Add error check if space will be sufficient */
1718 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1725 /* Offset control word */
1727 /* iv offset is 0 */
1728 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1730 i = fill_sg_comp(gather_comp, i, offset_dma,
1731 OFF_CTRL_LEN + iv_len);
1733 iv_d = (uint32_t *)((uint8_t *)offset_vaddr + OFF_CTRL_LEN);
1734 memcpy(iv_d, iv, 16);
1736 /* Add input data */
1737 size = inputlen - iv_len;
1739 i = fill_sg_comp_from_iov(gather_comp, i,
1743 return ERR_BAD_INPUT_ARG;
1745 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1746 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1749 * Output Scatter List
1754 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1757 i = fill_sg_comp(scatter_comp, i,
1758 offset_dma + OFF_CTRL_LEN,
1761 /* Add output data */
1762 size = outputlen - iv_len;
1764 i = fill_sg_comp_from_iov(scatter_comp, i,
1769 return ERR_BAD_INPUT_ARG;
1771 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
1772 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1774 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
1776 /* This is DPTR len incase of SG mode */
1777 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
1779 m_vaddr = (uint8_t *)m_vaddr + size;
1783 /* cpt alternate completion address saved earlier */
1784 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
1785 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
1786 rptr_dma = c_dma - 8;
1788 req->ist.ei1 = dptr_dma;
1789 req->ist.ei2 = rptr_dma;
1792 /* First 16-bit swap then 64-bit swap */
1793 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
1794 * to eliminate all the swapping
1796 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
1800 vq_cmd_w3.s.grp = 0;
1801 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
1802 offsetof(struct cpt_ctx, zs_ctx);
1804 /* 16 byte aligned cpt res address */
1805 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
1806 *req->completion_addr = COMPLETION_CODE_INIT;
1807 req->comp_baddr = c_dma;
1809 /* Fill microcode part of instruction */
1810 req->ist.ei0 = vq_cmd_w0.u64;
1811 req->ist.ei3 = vq_cmd_w3.u64;
1819 static __rte_always_inline int
1820 cpt_kasumi_enc_prep(uint32_t req_flags,
1823 fc_params_t *params,
1828 int32_t inputlen = 0, outputlen = 0;
1829 struct cpt_ctx *cpt_ctx;
1830 uint32_t mac_len = 0;
1832 struct cpt_request_info *req;
1834 uint32_t encr_offset, auth_offset;
1835 uint32_t encr_data_len, auth_data_len;
1837 uint8_t *iv_s, *iv_d, iv_len = 8;
1839 void *m_vaddr, *c_vaddr;
1840 uint64_t m_dma, c_dma;
1841 uint64_t *offset_vaddr, offset_dma;
1842 vq_cmd_word0_t vq_cmd_w0;
1843 vq_cmd_word3_t vq_cmd_w3;
1844 opcode_info_t opcode;
1846 uint32_t g_size_bytes, s_size_bytes;
1847 uint64_t dptr_dma, rptr_dma;
1848 sg_comp_t *gather_comp;
1849 sg_comp_t *scatter_comp;
1851 buf_p = ¶ms->meta_buf;
1852 m_vaddr = buf_p->vaddr;
1853 m_dma = buf_p->dma_addr;
1854 m_size = buf_p->size;
1856 encr_offset = ENCR_OFFSET(d_offs) / 8;
1857 auth_offset = AUTH_OFFSET(d_offs) / 8;
1858 encr_data_len = ENCR_DLEN(d_lens);
1859 auth_data_len = AUTH_DLEN(d_lens);
1861 cpt_ctx = params->ctx_buf.vaddr;
1862 flags = cpt_ctx->zsk_flags;
1863 mac_len = cpt_ctx->mac_len;
1866 iv_s = params->iv_buf;
1868 iv_s = params->auth_iv_buf;
1870 dir = iv_s[8] & 0x1;
1873 * Save initial space that followed app data for completion code &
1874 * alternate completion code to fall in same cache line as app data
1876 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
1877 m_dma += COMPLETION_CODE_SIZE;
1878 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
1881 c_vaddr = (uint8_t *)m_vaddr + size;
1882 c_dma = m_dma + size;
1883 size += sizeof(cpt_res_s_t);
1885 m_vaddr = (uint8_t *)m_vaddr + size;
1889 /* Reserve memory for cpt request info */
1892 size = sizeof(struct cpt_request_info);
1893 m_vaddr = (uint8_t *)m_vaddr + size;
1897 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
1899 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
1900 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
1901 (dir << 4) | (0 << 3) | (flags & 0x7));
1904 * GP op header, lengths are expected in bits.
1907 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
1908 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
1909 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
1911 /* consider iv len */
1913 encr_offset += iv_len;
1914 auth_offset += iv_len;
1917 /* save space for offset ctrl and iv */
1918 offset_vaddr = m_vaddr;
1921 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
1922 m_dma += OFF_CTRL_LEN + iv_len;
1923 m_size -= OFF_CTRL_LEN + iv_len;
1925 /* DPTR has SG list */
1926 in_buffer = m_vaddr;
1929 ((uint16_t *)in_buffer)[0] = 0;
1930 ((uint16_t *)in_buffer)[1] = 0;
1932 /* TODO Add error check if space will be sufficient */
1933 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
1940 /* Offset control word followed by iv */
1943 inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
1944 outputlen = inputlen;
1945 /* iv offset is 0 */
1946 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
1948 inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
1949 outputlen = mac_len;
1950 /* iv offset is 0 */
1951 *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
1954 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
1957 iv_d = (uint8_t *)offset_vaddr + OFF_CTRL_LEN;
1958 memcpy(iv_d, iv_s, iv_len);
1961 size = inputlen - iv_len;
1963 i = fill_sg_comp_from_iov(gather_comp, i,
1968 return ERR_BAD_INPUT_ARG;
1970 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
1971 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
1974 * Output Scatter List
1978 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
1981 /* IV in SLIST only for F8 */
1987 i = fill_sg_comp(scatter_comp, i,
1988 offset_dma + OFF_CTRL_LEN,
1992 /* Add output data */
1993 if (req_flags & VALID_MAC_BUF) {
1994 size = outputlen - iv_len - mac_len;
1996 i = fill_sg_comp_from_iov(scatter_comp, i,
2001 return ERR_BAD_INPUT_ARG;
2006 i = fill_sg_comp_from_buf(scatter_comp, i,
2010 /* Output including mac */
2011 size = outputlen - iv_len;
2013 i = fill_sg_comp_from_iov(scatter_comp, i,
2018 return ERR_BAD_INPUT_ARG;
2021 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2022 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2024 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2026 /* This is DPTR len incase of SG mode */
2027 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2029 m_vaddr = (uint8_t *)m_vaddr + size;
2033 /* cpt alternate completion address saved earlier */
2034 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2035 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2036 rptr_dma = c_dma - 8;
2038 req->ist.ei1 = dptr_dma;
2039 req->ist.ei2 = rptr_dma;
2041 /* First 16-bit swap then 64-bit swap */
2042 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2043 * to eliminate all the swapping
2045 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2049 vq_cmd_w3.s.grp = 0;
2050 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2051 offsetof(struct cpt_ctx, k_ctx);
2053 /* 16 byte aligned cpt res address */
2054 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2055 *req->completion_addr = COMPLETION_CODE_INIT;
2056 req->comp_baddr = c_dma;
2058 /* Fill microcode part of instruction */
2059 req->ist.ei0 = vq_cmd_w0.u64;
2060 req->ist.ei3 = vq_cmd_w3.u64;
2068 static __rte_always_inline int
2069 cpt_kasumi_dec_prep(uint64_t d_offs,
2071 fc_params_t *params,
2076 int32_t inputlen = 0, outputlen;
2077 struct cpt_ctx *cpt_ctx;
2078 uint8_t i = 0, iv_len = 8;
2079 struct cpt_request_info *req;
2081 uint32_t encr_offset;
2082 uint32_t encr_data_len;
2085 void *m_vaddr, *c_vaddr;
2086 uint64_t m_dma, c_dma;
2087 uint64_t *offset_vaddr, offset_dma;
2088 vq_cmd_word0_t vq_cmd_w0;
2089 vq_cmd_word3_t vq_cmd_w3;
2090 opcode_info_t opcode;
2092 uint32_t g_size_bytes, s_size_bytes;
2093 uint64_t dptr_dma, rptr_dma;
2094 sg_comp_t *gather_comp;
2095 sg_comp_t *scatter_comp;
2097 buf_p = ¶ms->meta_buf;
2098 m_vaddr = buf_p->vaddr;
2099 m_dma = buf_p->dma_addr;
2100 m_size = buf_p->size;
2102 encr_offset = ENCR_OFFSET(d_offs) / 8;
2103 encr_data_len = ENCR_DLEN(d_lens);
2105 cpt_ctx = params->ctx_buf.vaddr;
2106 flags = cpt_ctx->zsk_flags;
2108 * Save initial space that followed app data for completion code &
2109 * alternate completion code to fall in same cache line as app data
2111 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
2112 m_dma += COMPLETION_CODE_SIZE;
2113 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
2116 c_vaddr = (uint8_t *)m_vaddr + size;
2117 c_dma = m_dma + size;
2118 size += sizeof(cpt_res_s_t);
2120 m_vaddr = (uint8_t *)m_vaddr + size;
2124 /* Reserve memory for cpt request info */
2127 size = sizeof(struct cpt_request_info);
2128 m_vaddr = (uint8_t *)m_vaddr + size;
2132 opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
2134 /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
2135 opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
2136 (dir << 4) | (0 << 3) | (flags & 0x7));
2139 * GP op header, lengths are expected in bits.
2142 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
2143 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
2145 /* consider iv len */
2146 encr_offset += iv_len;
2148 inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
2149 outputlen = inputlen;
2151 /* save space for offset ctrl & iv */
2152 offset_vaddr = m_vaddr;
2155 m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
2156 m_dma += OFF_CTRL_LEN + iv_len;
2157 m_size -= OFF_CTRL_LEN + iv_len;
2159 /* DPTR has SG list */
2160 in_buffer = m_vaddr;
2163 ((uint16_t *)in_buffer)[0] = 0;
2164 ((uint16_t *)in_buffer)[1] = 0;
2166 /* TODO Add error check if space will be sufficient */
2167 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
2174 /* Offset control word followed by iv */
2175 *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
2177 i = fill_sg_comp(gather_comp, i, offset_dma, OFF_CTRL_LEN + iv_len);
2180 memcpy((uint8_t *)offset_vaddr + OFF_CTRL_LEN,
2181 params->iv_buf, iv_len);
2183 /* Add input data */
2184 size = inputlen - iv_len;
2186 i = fill_sg_comp_from_iov(gather_comp, i,
2190 return ERR_BAD_INPUT_ARG;
2192 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
2193 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2196 * Output Scatter List
2200 scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
2203 i = fill_sg_comp(scatter_comp, i,
2204 offset_dma + OFF_CTRL_LEN,
2207 /* Add output data */
2208 size = outputlen - iv_len;
2210 i = fill_sg_comp_from_iov(scatter_comp, i,
2214 return ERR_BAD_INPUT_ARG;
2216 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
2217 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
2219 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
2221 /* This is DPTR len incase of SG mode */
2222 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
2224 m_vaddr = (uint8_t *)m_vaddr + size;
2228 /* cpt alternate completion address saved earlier */
2229 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
2230 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
2231 rptr_dma = c_dma - 8;
2233 req->ist.ei1 = dptr_dma;
2234 req->ist.ei2 = rptr_dma;
2236 /* First 16-bit swap then 64-bit swap */
2237 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
2238 * to eliminate all the swapping
2240 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
2244 vq_cmd_w3.s.grp = 0;
2245 vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
2246 offsetof(struct cpt_ctx, k_ctx);
2248 /* 16 byte aligned cpt res address */
2249 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
2250 *req->completion_addr = COMPLETION_CODE_INIT;
2251 req->comp_baddr = c_dma;
2253 /* Fill microcode part of instruction */
2254 req->ist.ei0 = vq_cmd_w0.u64;
2255 req->ist.ei3 = vq_cmd_w3.u64;
2263 static __rte_always_inline void *
2264 cpt_fc_dec_hmac_prep(uint32_t flags,
2267 fc_params_t *fc_params,
2268 void *op, int *ret_val)
2270 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2272 void *prep_req = NULL;
2275 fc_type = ctx->fc_type;
2277 if (likely(fc_type == FC_GEN)) {
2278 ret = cpt_dec_hmac_prep(flags, d_offs, d_lens,
2279 fc_params, op, &prep_req);
2280 } else if (fc_type == ZUC_SNOW3G) {
2281 ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens,
2282 fc_params, op, &prep_req);
2283 } else if (fc_type == KASUMI) {
2284 ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op,
2288 * For AUTH_ONLY case,
2289 * MC only supports digest generation and verification
2290 * should be done in software by memcmp()
2296 if (unlikely(!prep_req))
2301 static __rte_always_inline void *__hot
2302 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
2303 fc_params_t *fc_params, void *op, int *ret_val)
2305 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
2307 void *prep_req = NULL;
2310 fc_type = ctx->fc_type;
2312 /* Common api for rest of the ops */
2313 if (likely(fc_type == FC_GEN)) {
2314 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
2315 fc_params, op, &prep_req);
2316 } else if (fc_type == ZUC_SNOW3G) {
2317 ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens,
2318 fc_params, op, &prep_req);
2319 } else if (fc_type == KASUMI) {
2320 ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
2321 fc_params, op, &prep_req);
2326 if (unlikely(!prep_req))
2331 static __rte_always_inline int
2332 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
2333 uint16_t key_len, uint16_t mac_len)
2335 struct cpt_ctx *cpt_ctx = ctx;
2336 mc_fc_context_t *fctx = &cpt_ctx->fctx;
2337 uint64_t *ctrl_flags = NULL;
2339 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
2344 /* No support for AEAD yet */
2345 if (cpt_ctx->enc_cipher)
2347 /* For ZUC/SNOW3G/Kasumi */
2350 cpt_ctx->snow3g = 1;
2351 gen_key_snow3g(key, keyx);
2352 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
2353 cpt_ctx->fc_type = ZUC_SNOW3G;
2354 cpt_ctx->zsk_flags = 0x1;
2357 cpt_ctx->snow3g = 0;
2358 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
2359 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
2360 cpt_ctx->fc_type = ZUC_SNOW3G;
2361 cpt_ctx->zsk_flags = 0x1;
2364 /* Kasumi ECB mode */
2366 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2367 cpt_ctx->fc_type = KASUMI;
2368 cpt_ctx->zsk_flags = 0x1;
2371 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
2372 cpt_ctx->fc_type = KASUMI;
2373 cpt_ctx->zsk_flags = 0x1;
2378 cpt_ctx->mac_len = 4;
2379 cpt_ctx->hash_type = type;
2383 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
2384 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
2385 cpt_ctx->fc_type = HASH_HMAC;
2388 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
2389 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
2391 /* For GMAC auth, cipher must be NULL */
2392 if (type == GMAC_TYPE)
2393 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
2395 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
2396 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
2400 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
2401 memcpy(cpt_ctx->auth_key, key, key_len);
2402 cpt_ctx->auth_key_len = key_len;
2403 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
2404 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
2405 memcpy(fctx->hmac.opad, key, key_len);
2406 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
2408 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
2412 static __rte_always_inline int
2413 fill_sess_aead(struct rte_crypto_sym_xform *xform,
2414 struct cpt_sess_misc *sess)
2416 struct rte_crypto_aead_xform *aead_form;
2417 cipher_type_t enc_type = 0; /* NULL Cipher type */
2418 auth_type_t auth_type = 0; /* NULL Auth type */
2419 uint32_t cipher_key_len = 0;
2420 uint8_t zsk_flag = 0, aes_gcm = 0;
2421 aead_form = &xform->aead;
2424 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
2425 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2426 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2427 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2428 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
2429 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
2430 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2431 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2433 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2436 switch (aead_form->algo) {
2437 case RTE_CRYPTO_AEAD_AES_GCM:
2439 cipher_key_len = 16;
2442 case RTE_CRYPTO_AEAD_AES_CCM:
2443 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2447 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2451 if (aead_form->key.length < cipher_key_len) {
2452 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2453 (unsigned int long)aead_form->key.length);
2456 sess->zsk_flag = zsk_flag;
2457 sess->aes_gcm = aes_gcm;
2458 sess->mac_len = aead_form->digest_length;
2459 sess->iv_offset = aead_form->iv.offset;
2460 sess->iv_length = aead_form->iv.length;
2461 sess->aad_length = aead_form->aad_length;
2462 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2464 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
2465 aead_form->key.length, NULL);
2467 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
2472 static __rte_always_inline int
2473 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
2474 struct cpt_sess_misc *sess)
2476 struct rte_crypto_cipher_xform *c_form;
2477 cipher_type_t enc_type = 0; /* NULL Cipher type */
2478 uint32_t cipher_key_len = 0;
2479 uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
2481 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
2484 c_form = &xform->cipher;
2486 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2487 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
2488 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
2489 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
2491 CPT_LOG_DP_ERR("Unknown cipher operation\n");
2495 switch (c_form->algo) {
2496 case RTE_CRYPTO_CIPHER_AES_CBC:
2498 cipher_key_len = 16;
2500 case RTE_CRYPTO_CIPHER_3DES_CBC:
2501 enc_type = DES3_CBC;
2502 cipher_key_len = 24;
2504 case RTE_CRYPTO_CIPHER_DES_CBC:
2505 /* DES is implemented using 3DES in hardware */
2506 enc_type = DES3_CBC;
2509 case RTE_CRYPTO_CIPHER_AES_CTR:
2511 cipher_key_len = 16;
2514 case RTE_CRYPTO_CIPHER_NULL:
2518 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2519 enc_type = KASUMI_F8_ECB;
2520 cipher_key_len = 16;
2523 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2524 enc_type = SNOW3G_UEA2;
2525 cipher_key_len = 16;
2528 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2529 enc_type = ZUC_EEA3;
2530 cipher_key_len = 16;
2533 case RTE_CRYPTO_CIPHER_AES_XTS:
2535 cipher_key_len = 16;
2537 case RTE_CRYPTO_CIPHER_3DES_ECB:
2538 enc_type = DES3_ECB;
2539 cipher_key_len = 24;
2541 case RTE_CRYPTO_CIPHER_AES_ECB:
2543 cipher_key_len = 16;
2545 case RTE_CRYPTO_CIPHER_3DES_CTR:
2546 case RTE_CRYPTO_CIPHER_AES_F8:
2547 case RTE_CRYPTO_CIPHER_ARC4:
2548 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
2552 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2557 if (c_form->key.length < cipher_key_len) {
2558 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
2559 (unsigned long) c_form->key.length);
2563 sess->zsk_flag = zsk_flag;
2564 sess->aes_gcm = aes_gcm;
2565 sess->aes_ctr = aes_ctr;
2566 sess->iv_offset = c_form->iv.offset;
2567 sess->iv_length = c_form->iv.length;
2568 sess->is_null = is_null;
2570 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
2571 c_form->key.length, NULL);
2576 static __rte_always_inline int
2577 fill_sess_auth(struct rte_crypto_sym_xform *xform,
2578 struct cpt_sess_misc *sess)
2580 struct rte_crypto_auth_xform *a_form;
2581 auth_type_t auth_type = 0; /* NULL Auth type */
2582 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
2584 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2587 a_form = &xform->auth;
2589 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2590 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
2591 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2592 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
2594 CPT_LOG_DP_ERR("Unknown auth operation");
2598 if (a_form->key.length > 64) {
2599 CPT_LOG_DP_ERR("Auth key length is big");
2603 switch (a_form->algo) {
2604 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2606 case RTE_CRYPTO_AUTH_SHA1:
2607 auth_type = SHA1_TYPE;
2609 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2610 case RTE_CRYPTO_AUTH_SHA256:
2611 auth_type = SHA2_SHA256;
2613 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2614 case RTE_CRYPTO_AUTH_SHA512:
2615 auth_type = SHA2_SHA512;
2617 case RTE_CRYPTO_AUTH_AES_GMAC:
2618 auth_type = GMAC_TYPE;
2621 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2622 case RTE_CRYPTO_AUTH_SHA224:
2623 auth_type = SHA2_SHA224;
2625 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2626 case RTE_CRYPTO_AUTH_SHA384:
2627 auth_type = SHA2_SHA384;
2629 case RTE_CRYPTO_AUTH_MD5_HMAC:
2630 case RTE_CRYPTO_AUTH_MD5:
2631 auth_type = MD5_TYPE;
2633 case RTE_CRYPTO_AUTH_KASUMI_F9:
2634 auth_type = KASUMI_F9_ECB;
2636 * Indicate that direction needs to be taken out
2641 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2642 auth_type = SNOW3G_UIA2;
2645 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2646 auth_type = ZUC_EIA3;
2649 case RTE_CRYPTO_AUTH_NULL:
2653 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2654 case RTE_CRYPTO_AUTH_AES_CMAC:
2655 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2656 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
2660 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
2665 sess->zsk_flag = zsk_flag;
2666 sess->aes_gcm = aes_gcm;
2667 sess->mac_len = a_form->digest_length;
2668 sess->is_null = is_null;
2670 sess->auth_iv_offset = a_form->iv.offset;
2671 sess->auth_iv_length = a_form->iv.length;
2673 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
2674 a_form->key.length, a_form->digest_length);
2682 static __rte_always_inline int
2683 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
2684 struct cpt_sess_misc *sess)
2686 struct rte_crypto_auth_xform *a_form;
2687 cipher_type_t enc_type = 0; /* NULL Cipher type */
2688 auth_type_t auth_type = 0; /* NULL Auth type */
2689 uint8_t zsk_flag = 0, aes_gcm = 0;
2692 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
2695 a_form = &xform->auth;
2697 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
2698 sess->cpt_op |= CPT_OP_ENCODE;
2699 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
2700 sess->cpt_op |= CPT_OP_DECODE;
2702 CPT_LOG_DP_ERR("Unknown auth operation");
2706 switch (a_form->algo) {
2707 case RTE_CRYPTO_AUTH_AES_GMAC:
2709 auth_type = GMAC_TYPE;
2712 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
2717 sess->zsk_flag = zsk_flag;
2718 sess->aes_gcm = aes_gcm;
2720 sess->iv_offset = a_form->iv.offset;
2721 sess->iv_length = a_form->iv.length;
2722 sess->mac_len = a_form->digest_length;
2723 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
2725 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
2726 a_form->key.length, NULL);
2727 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
2732 static __rte_always_inline void *
2733 alloc_op_meta(struct rte_mbuf *m_src,
2736 struct rte_mempool *cpt_meta_pool)
2740 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
2741 if (likely(m_src && (m_src->nb_segs == 1))) {
2745 /* Check if tailroom is sufficient to hold meta data */
2746 tailroom = rte_pktmbuf_tailroom(m_src);
2747 if (likely(tailroom > len + 8)) {
2748 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
2749 mphys = m_src->buf_physaddr + m_src->buf_len;
2753 buf->dma_addr = mphys;
2755 /* Indicate that this is a mbuf allocated mdata */
2756 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
2761 RTE_SET_USED(m_src);
2764 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
2768 buf->dma_addr = rte_mempool_virt2iova(mdata);
2775 * cpt_free_metabuf - free metabuf to mempool.
2776 * @param instance: pointer to instance.
2777 * @param objp: pointer to the metabuf.
2779 static __rte_always_inline void
2780 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
2782 bool nofree = ((uintptr_t)mdata & 1ull);
2786 rte_mempool_put(cpt_meta_pool, mdata);
2789 static __rte_always_inline uint32_t
2790 prepare_iov_from_pkt(struct rte_mbuf *pkt,
2791 iov_ptr_t *iovec, uint32_t start_offset)
2794 void *seg_data = NULL;
2795 phys_addr_t seg_phys;
2796 int32_t seg_size = 0;
2803 if (!start_offset) {
2804 seg_data = rte_pktmbuf_mtod(pkt, void *);
2805 seg_phys = rte_pktmbuf_mtophys(pkt);
2806 seg_size = pkt->data_len;
2808 while (start_offset >= pkt->data_len) {
2809 start_offset -= pkt->data_len;
2813 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
2814 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
2815 seg_size = pkt->data_len - start_offset;
2821 iovec->bufs[index].vaddr = seg_data;
2822 iovec->bufs[index].dma_addr = seg_phys;
2823 iovec->bufs[index].size = seg_size;
2827 while (unlikely(pkt != NULL)) {
2828 seg_data = rte_pktmbuf_mtod(pkt, void *);
2829 seg_phys = rte_pktmbuf_mtophys(pkt);
2830 seg_size = pkt->data_len;
2834 iovec->bufs[index].vaddr = seg_data;
2835 iovec->bufs[index].dma_addr = seg_phys;
2836 iovec->bufs[index].size = seg_size;
2843 iovec->buf_cnt = index;
2847 static __rte_always_inline uint32_t
2848 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
2853 void *seg_data = NULL;
2854 phys_addr_t seg_phys;
2855 uint32_t seg_size = 0;
2858 seg_data = rte_pktmbuf_mtod(pkt, void *);
2859 seg_phys = rte_pktmbuf_mtophys(pkt);
2860 seg_size = pkt->data_len;
2863 if (likely(!pkt->next)) {
2864 uint32_t headroom, tailroom;
2866 *flags |= SINGLE_BUF_INPLACE;
2867 headroom = rte_pktmbuf_headroom(pkt);
2868 tailroom = rte_pktmbuf_tailroom(pkt);
2869 if (likely((headroom >= 24) &&
2871 /* In 83XX this is prerequivisit for Direct mode */
2872 *flags |= SINGLE_BUF_HEADTAILROOM;
2874 param->bufs[0].vaddr = seg_data;
2875 param->bufs[0].dma_addr = seg_phys;
2876 param->bufs[0].size = seg_size;
2879 iovec = param->src_iov;
2880 iovec->bufs[index].vaddr = seg_data;
2881 iovec->bufs[index].dma_addr = seg_phys;
2882 iovec->bufs[index].size = seg_size;
2886 while (unlikely(pkt != NULL)) {
2887 seg_data = rte_pktmbuf_mtod(pkt, void *);
2888 seg_phys = rte_pktmbuf_mtophys(pkt);
2889 seg_size = pkt->data_len;
2894 iovec->bufs[index].vaddr = seg_data;
2895 iovec->bufs[index].dma_addr = seg_phys;
2896 iovec->bufs[index].size = seg_size;
2903 iovec->buf_cnt = index;
2907 static __rte_always_inline void *
2908 fill_fc_params(struct rte_crypto_op *cop,
2909 struct cpt_sess_misc *sess_misc,
2914 struct rte_crypto_sym_op *sym_op = cop->sym;
2917 uint32_t mc_hash_off;
2919 uint64_t d_offs, d_lens;
2920 void *prep_req = NULL;
2921 struct rte_mbuf *m_src, *m_dst;
2922 uint8_t cpt_op = sess_misc->cpt_op;
2923 uint8_t zsk_flag = sess_misc->zsk_flag;
2924 uint8_t aes_gcm = sess_misc->aes_gcm;
2925 uint16_t mac_len = sess_misc->mac_len;
2926 #ifdef CPT_ALWAYS_USE_SG_MODE
2927 uint8_t inplace = 0;
2929 uint8_t inplace = 1;
2931 fc_params_t fc_params;
2932 char src[SRC_IOV_SIZE];
2933 char dst[SRC_IOV_SIZE];
2935 struct cptvf_meta_info *cpt_m_info =
2936 (struct cptvf_meta_info *)(*mdata_ptr);
2938 if (likely(sess_misc->iv_length)) {
2939 flags |= VALID_IV_BUF;
2940 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
2941 uint8_t *, sess_misc->iv_offset);
2942 if (sess_misc->aes_ctr &&
2943 unlikely(sess_misc->iv_length != 16)) {
2944 memcpy((uint8_t *)iv_buf,
2945 rte_crypto_op_ctod_offset(cop,
2946 uint8_t *, sess_misc->iv_offset), 12);
2947 iv_buf[3] = rte_cpu_to_be_32(0x1);
2948 fc_params.iv_buf = iv_buf;
2953 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
2955 sess_misc->auth_iv_offset);
2956 if (zsk_flag == K_F9) {
2957 CPT_LOG_DP_ERR("Should not reach here for "
2960 if (zsk_flag != ZS_EA)
2963 m_src = sym_op->m_src;
2964 m_dst = sym_op->m_dst;
2971 d_offs = sym_op->aead.data.offset;
2972 d_lens = sym_op->aead.data.length;
2973 mc_hash_off = sym_op->aead.data.offset +
2974 sym_op->aead.data.length;
2976 aad_data = sym_op->aead.aad.data;
2977 aad_len = sess_misc->aad_length;
2978 if (likely((aad_data + aad_len) ==
2979 rte_pktmbuf_mtod_offset(m_src,
2981 sym_op->aead.data.offset))) {
2982 d_offs = (d_offs - aad_len) | (d_offs << 16);
2983 d_lens = (d_lens + aad_len) | (d_lens << 32);
2985 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
2986 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
2987 fc_params.aad_buf.size = aad_len;
2988 flags |= VALID_AAD_BUF;
2990 d_offs = d_offs << 16;
2991 d_lens = d_lens << 32;
2994 salt = fc_params.iv_buf;
2995 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
2996 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
2997 sess_misc->salt = *(uint32_t *)salt;
2999 fc_params.iv_buf = salt + 4;
3000 if (likely(mac_len)) {
3001 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
3007 /* hmac immediately following data is best case */
3008 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3010 (uint8_t *)sym_op->aead.digest.data)) {
3011 flags |= VALID_MAC_BUF;
3012 fc_params.mac_buf.size = sess_misc->mac_len;
3013 fc_params.mac_buf.vaddr =
3014 sym_op->aead.digest.data;
3015 fc_params.mac_buf.dma_addr =
3016 sym_op->aead.digest.phys_addr;
3021 d_offs = sym_op->cipher.data.offset;
3022 d_lens = sym_op->cipher.data.length;
3023 mc_hash_off = sym_op->cipher.data.offset +
3024 sym_op->cipher.data.length;
3025 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
3026 d_lens = (d_lens << 32) | sym_op->auth.data.length;
3028 if (mc_hash_off < (sym_op->auth.data.offset +
3029 sym_op->auth.data.length)){
3030 mc_hash_off = (sym_op->auth.data.offset +
3031 sym_op->auth.data.length);
3033 /* for gmac, salt should be updated like in gcm */
3034 if (unlikely(sess_misc->is_gmac)) {
3036 salt = fc_params.iv_buf;
3037 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
3038 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
3039 sess_misc->salt = *(uint32_t *)salt;
3041 fc_params.iv_buf = salt + 4;
3043 if (likely(mac_len)) {
3046 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
3050 /* hmac immediately following data is best case */
3051 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
3053 (uint8_t *)sym_op->auth.digest.data)) {
3054 flags |= VALID_MAC_BUF;
3055 fc_params.mac_buf.size =
3057 fc_params.mac_buf.vaddr =
3058 sym_op->auth.digest.data;
3059 fc_params.mac_buf.dma_addr =
3060 sym_op->auth.digest.phys_addr;
3065 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
3066 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
3068 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
3071 if (likely(!m_dst && inplace)) {
3072 /* Case of single buffer without AAD buf or
3073 * separate mac buf in place and
3076 fc_params.dst_iov = fc_params.src_iov = (void *)src;
3078 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
3081 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
3087 /* Out of place processing */
3088 fc_params.src_iov = (void *)src;
3089 fc_params.dst_iov = (void *)dst;
3091 /* Store SG I/O in the api for reuse */
3092 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
3093 CPT_LOG_DP_ERR("Prepare src iov failed");
3098 if (unlikely(m_dst != NULL)) {
3101 /* Try to make room as much as src has */
3102 m_dst = sym_op->m_dst;
3103 pkt_len = rte_pktmbuf_pkt_len(m_dst);
3105 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
3106 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
3107 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
3108 CPT_LOG_DP_ERR("Not enough space in "
3116 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
3117 CPT_LOG_DP_ERR("Prepare dst iov failed for "
3122 fc_params.dst_iov = (void *)src;
3126 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
3127 mdata = alloc_op_meta(m_src,
3128 &fc_params.meta_buf,
3129 cpt_m_info->cptvf_op_sb_mlen,
3130 cpt_m_info->cptvf_meta_pool);
3132 mdata = alloc_op_meta(NULL,
3133 &fc_params.meta_buf,
3134 cpt_m_info->cptvf_op_mlen,
3135 cpt_m_info->cptvf_meta_pool);
3137 if (unlikely(mdata == NULL)) {
3138 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
3142 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
3143 op[0] = (uintptr_t)mdata;
3144 op[1] = (uintptr_t)cop;
3145 op[2] = op[3] = 0; /* Used to indicate auth verify */
3146 space += 4 * sizeof(uint64_t);
3148 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
3149 fc_params.meta_buf.dma_addr += space;
3150 fc_params.meta_buf.size -= space;
3152 /* Finally prepare the instruction */
3153 if (cpt_op & CPT_OP_ENCODE)
3154 prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
3155 &fc_params, op, op_ret);
3157 prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
3158 &fc_params, op, op_ret);
3160 if (unlikely(!prep_req))
3161 free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
3166 static __rte_always_inline int
3167 instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
3169 struct rte_crypto_sym_xform *chain;
3171 CPT_PMD_INIT_FUNC_TRACE();
3173 if (cpt_is_algo_supported(xform))
3178 switch (chain->type) {
3179 case RTE_CRYPTO_SYM_XFORM_AEAD:
3180 if (fill_sess_aead(chain, sess))
3183 case RTE_CRYPTO_SYM_XFORM_CIPHER:
3184 if (fill_sess_cipher(chain, sess))
3187 case RTE_CRYPTO_SYM_XFORM_AUTH:
3188 if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
3189 if (fill_sess_gmac(chain, sess))
3192 if (fill_sess_auth(chain, sess))
3197 CPT_LOG_DP_ERR("Invalid crypto xform type");
3200 chain = chain->next;
3209 #endif /*_CPT_UCODE_H_ */