1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
10 #include "cpt_common.h"
11 #include "cpt_hw_types.h"
12 #include "cpt_mcode_defines.h"
15 * This file defines functions that are interfaces to microcode spec.
19 static uint8_t zuc_d[32] = {
20 0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
21 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
22 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
23 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
26 static __rte_always_inline int
27 cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
30 * Microcode only supports the following combination.
31 * Encryption followed by authentication
32 * Authentication followed by decryption
35 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
36 (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
37 (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
38 /* Unsupported as of now by microcode */
39 CPT_LOG_DP_ERR("Unsupported combination");
42 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
43 (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
44 (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
45 /* For GMAC auth there is no cipher operation */
46 if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
47 xform->next->auth.algo !=
48 RTE_CRYPTO_AUTH_AES_GMAC) {
49 /* Unsupported as of now by microcode */
50 CPT_LOG_DP_ERR("Unsupported combination");
58 static __rte_always_inline void
59 gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
63 for (i = 0; i < 4; i++) {
65 keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
66 (ck[base + 2] << 8) | (ck[base + 3]);
67 keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
71 static __rte_always_inline void
72 cpt_fc_salt_update(void *ctx,
75 struct cpt_ctx *cpt_ctx = ctx;
76 memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
79 static __rte_always_inline int
80 cpt_fc_ciph_validate_key_aes(uint16_t key_len)
92 static __rte_always_inline int
93 cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
110 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
115 key_len = key_len / 2;
116 if (unlikely(key_len == CPT_BYTE_24)) {
117 CPT_LOG_DP_ERR("Invalid AES key len for XTS");
120 if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
126 if (unlikely(key_len != 16))
128 /* No support for AEAD yet */
129 if (unlikely(cpt_ctx->hash_type))
131 fc_type = ZUC_SNOW3G;
135 if (unlikely(key_len != 16))
137 /* No support for AEAD yet */
138 if (unlikely(cpt_ctx->hash_type))
148 static __rte_always_inline void
149 cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
151 cpt_ctx->enc_cipher = 0;
152 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
155 static __rte_always_inline void
156 cpt_fc_ciph_set_key_set_aes_key_type(mc_fc_context_t *fctx, uint16_t key_len)
158 mc_aes_type_t aes_key_type = 0;
161 aes_key_type = AES_128_BIT;
164 aes_key_type = AES_192_BIT;
167 aes_key_type = AES_256_BIT;
170 /* This should not happen */
171 CPT_LOG_DP_ERR("Invalid AES key len");
174 CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
177 static __rte_always_inline void
178 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
183 gen_key_snow3g(key, keyx);
184 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
185 cpt_ctx->fc_type = ZUC_SNOW3G;
186 cpt_ctx->zsk_flags = 0;
189 static __rte_always_inline void
190 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
194 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
195 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
196 cpt_ctx->fc_type = ZUC_SNOW3G;
197 cpt_ctx->zsk_flags = 0;
200 static __rte_always_inline void
201 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
205 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
206 cpt_ctx->zsk_flags = 0;
207 cpt_ctx->fc_type = KASUMI;
210 static __rte_always_inline void
211 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
214 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
215 cpt_ctx->zsk_flags = 0;
216 cpt_ctx->fc_type = KASUMI;
219 static __rte_always_inline int
220 cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
221 uint16_t key_len, uint8_t *salt)
223 struct cpt_ctx *cpt_ctx = ctx;
224 mc_fc_context_t *fctx = &cpt_ctx->fctx;
225 uint64_t *ctrl_flags = NULL;
228 /* Validate key before proceeding */
229 fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
230 if (unlikely(fc_type == -1))
233 if (fc_type == FC_GEN) {
234 cpt_ctx->fc_type = FC_GEN;
235 ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
236 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
238 * We need to always say IV is from DPTR as user can
239 * sometimes iverride IV per operation.
241 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
246 cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
249 /* CPT performs DES using 3DES with the 8B DES-key
250 * replicated 2 more times to match the 24B 3DES-key.
251 * Eg. If org. key is "0x0a 0x0b", then new key is
252 * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
255 /* Skipping the first 8B as it will be copied
256 * in the regular code flow
258 memcpy(fctx->enc.encr_key+key_len, key, key_len);
259 memcpy(fctx->enc.encr_key+2*key_len, key, key_len);
263 /* For DES3_ECB IV need to be from CTX. */
264 CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
270 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
273 /* Even though iv source is from dptr,
274 * aes_gcm salt is taken from ctx
277 memcpy(fctx->enc.encr_iv, salt, 4);
278 /* Assuming it was just salt update
284 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
287 key_len = key_len / 2;
288 cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
290 /* Copy key2 for XTS into ipad */
291 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
292 memcpy(fctx->hmac.ipad, &key[key_len], key_len);
295 cpt_fc_ciph_set_key_snow3g_uea2(cpt_ctx, key, key_len);
298 cpt_fc_ciph_set_key_zuc_eea3(cpt_ctx, key, key_len);
301 cpt_fc_ciph_set_key_kasumi_f8_ecb(cpt_ctx, key, key_len);
304 cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
310 /* Only for FC_GEN case */
312 /* For GMAC auth, cipher must be NULL */
313 if (cpt_ctx->hash_type != GMAC_TYPE)
314 CPT_P_ENC_CTRL(fctx).enc_cipher = type;
316 memcpy(fctx->enc.encr_key, key, key_len);
319 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
322 cpt_ctx->enc_cipher = type;
327 static __rte_always_inline uint32_t
328 fill_sg_comp(sg_comp_t *list,
330 phys_addr_t dma_addr,
333 sg_comp_t *to = &list[i>>2];
335 to->u.s.len[i%4] = rte_cpu_to_be_16(size);
336 to->ptr[i%4] = rte_cpu_to_be_64(dma_addr);
341 static __rte_always_inline uint32_t
342 fill_sg_comp_from_buf(sg_comp_t *list,
346 sg_comp_t *to = &list[i>>2];
348 to->u.s.len[i%4] = rte_cpu_to_be_16(from->size);
349 to->ptr[i%4] = rte_cpu_to_be_64(from->dma_addr);
354 static __rte_always_inline uint32_t
355 fill_sg_comp_from_buf_min(sg_comp_t *list,
360 sg_comp_t *to = &list[i >> 2];
361 uint32_t size = *psize;
364 e_len = (size > from->size) ? from->size : size;
365 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
366 to->ptr[i % 4] = rte_cpu_to_be_64(from->dma_addr);
373 * This fills the MC expected SGIO list
374 * from IOV given by user.
376 static __rte_always_inline uint32_t
377 fill_sg_comp_from_iov(sg_comp_t *list,
379 iov_ptr_t *from, uint32_t from_offset,
380 uint32_t *psize, buf_ptr_t *extra_buf,
381 uint32_t extra_offset)
384 uint32_t extra_len = extra_buf ? extra_buf->size : 0;
385 uint32_t size = *psize - extra_len;
389 for (j = 0; (j < from->buf_cnt) && size; j++) {
390 phys_addr_t e_dma_addr;
392 sg_comp_t *to = &list[i >> 2];
397 if (unlikely(from_offset)) {
398 if (from_offset >= bufs[j].size) {
399 from_offset -= bufs[j].size;
402 e_dma_addr = bufs[j].dma_addr + from_offset;
403 e_len = (size > (bufs[j].size - from_offset)) ?
404 (bufs[j].size - from_offset) : size;
407 e_dma_addr = bufs[j].dma_addr;
408 e_len = (size > bufs[j].size) ?
412 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
413 to->ptr[i % 4] = rte_cpu_to_be_64(e_dma_addr);
415 if (extra_len && (e_len >= extra_offset)) {
416 /* Break the data at given offset */
417 uint32_t next_len = e_len - extra_offset;
418 phys_addr_t next_dma = e_dma_addr + extra_offset;
423 e_len = extra_offset;
425 to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
428 /* Insert extra data ptr */
433 rte_cpu_to_be_16(extra_buf->size);
435 rte_cpu_to_be_64(extra_buf->dma_addr);
437 /* size already decremented by extra len */
440 /* insert the rest of the data */
444 to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
445 to->ptr[i % 4] = rte_cpu_to_be_64(next_dma);
454 extra_offset -= size;
462 static __rte_always_inline int
463 cpt_enc_hmac_prep(uint32_t flags,
466 fc_params_t *fc_params,
470 uint32_t iv_offset = 0;
471 int32_t inputlen, outputlen, enc_dlen, auth_dlen;
472 struct cpt_ctx *cpt_ctx;
473 uint32_t cipher_type, hash_type;
474 uint32_t mac_len, size;
476 struct cpt_request_info *req;
477 buf_ptr_t *meta_p, *aad_buf = NULL;
478 uint32_t encr_offset, auth_offset;
479 uint32_t encr_data_len, auth_data_len, aad_len = 0;
480 uint32_t passthrough_len = 0;
481 void *m_vaddr, *offset_vaddr;
482 uint64_t m_dma, offset_dma, ctx_dma;
483 vq_cmd_word0_t vq_cmd_w0;
484 vq_cmd_word3_t vq_cmd_w3;
488 opcode_info_t opcode;
490 meta_p = &fc_params->meta_buf;
491 m_vaddr = meta_p->vaddr;
492 m_dma = meta_p->dma_addr;
493 m_size = meta_p->size;
495 encr_offset = ENCR_OFFSET(d_offs);
496 auth_offset = AUTH_OFFSET(d_offs);
497 encr_data_len = ENCR_DLEN(d_lens);
498 auth_data_len = AUTH_DLEN(d_lens);
499 if (unlikely(flags & VALID_AAD_BUF)) {
501 * We dont support both aad
502 * and auth data separately
506 aad_len = fc_params->aad_buf.size;
507 aad_buf = &fc_params->aad_buf;
509 cpt_ctx = fc_params->ctx_buf.vaddr;
510 cipher_type = cpt_ctx->enc_cipher;
511 hash_type = cpt_ctx->hash_type;
512 mac_len = cpt_ctx->mac_len;
515 * Save initial space that followed app data for completion code &
516 * alternate completion code to fall in same cache line as app data
518 m_vaddr = (uint8_t *)m_vaddr + COMPLETION_CODE_SIZE;
519 m_dma += COMPLETION_CODE_SIZE;
520 size = (uint8_t *)RTE_PTR_ALIGN((uint8_t *)m_vaddr, 16) -
523 c_vaddr = (uint8_t *)m_vaddr + size;
524 c_dma = m_dma + size;
525 size += sizeof(cpt_res_s_t);
527 m_vaddr = (uint8_t *)m_vaddr + size;
531 /* start cpt request info struct at 8 byte boundary */
532 size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
535 req = (struct cpt_request_info *)((uint8_t *)m_vaddr + size);
537 size += sizeof(struct cpt_request_info);
538 m_vaddr = (uint8_t *)m_vaddr + size;
542 if (hash_type == GMAC_TYPE)
545 if (unlikely(!(flags & VALID_IV_BUF))) {
547 iv_offset = ENCR_IV_OFFSET(d_offs);
550 if (unlikely(flags & VALID_AAD_BUF)) {
552 * When AAD is given, data above encr_offset is pass through
553 * Since AAD is given as separate pointer and not as offset,
554 * this is a special case as we need to fragment input data
555 * into passthrough + encr_data and then insert AAD in between.
557 if (hash_type != GMAC_TYPE) {
558 passthrough_len = encr_offset;
559 auth_offset = passthrough_len + iv_len;
560 encr_offset = passthrough_len + aad_len + iv_len;
561 auth_data_len = aad_len + encr_data_len;
563 passthrough_len = 16 + aad_len;
564 auth_offset = passthrough_len + iv_len;
565 auth_data_len = aad_len;
568 encr_offset += iv_len;
569 auth_offset += iv_len;
573 opcode.s.major = CPT_MAJOR_OP_FC;
576 auth_dlen = auth_offset + auth_data_len;
577 enc_dlen = encr_data_len + encr_offset;
578 if (unlikely(encr_data_len & 0xf)) {
579 if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
580 enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
581 else if (likely((cipher_type == AES_CBC) ||
582 (cipher_type == AES_ECB)))
583 enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
586 if (unlikely(hash_type == GMAC_TYPE)) {
587 encr_offset = auth_dlen;
591 if (unlikely(auth_dlen > enc_dlen)) {
592 inputlen = auth_dlen;
593 outputlen = auth_dlen + mac_len;
596 outputlen = enc_dlen + mac_len;
601 vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
602 vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
604 * In 83XX since we have a limitation of
605 * IV & Offset control word not part of instruction
606 * and need to be part of Data Buffer, we check if
607 * head room is there and then only do the Direct mode processing
609 if (likely((flags & SINGLE_BUF_INPLACE) &&
610 (flags & SINGLE_BUF_HEADTAILROOM))) {
611 void *dm_vaddr = fc_params->bufs[0].vaddr;
612 uint64_t dm_dma_addr = fc_params->bufs[0].dma_addr;
614 * This flag indicates that there is 24 bytes head room and
615 * 8 bytes tail room available, so that we get to do
616 * DIRECT MODE with limitation
619 offset_vaddr = (uint8_t *)dm_vaddr - OFF_CTRL_LEN - iv_len;
620 offset_dma = dm_dma_addr - OFF_CTRL_LEN - iv_len;
623 req->ist.ei1 = offset_dma;
624 /* RPTR should just exclude offset control word */
625 req->ist.ei2 = dm_dma_addr - iv_len;
626 req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
627 + outputlen - iv_len);
629 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
631 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
633 if (likely(iv_len)) {
634 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
636 uint64_t *src = fc_params->iv_buf;
641 *(uint64_t *)offset_vaddr =
642 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
643 ((uint64_t)iv_offset << 8) |
644 ((uint64_t)auth_offset));
647 uint32_t i, g_size_bytes, s_size_bytes;
648 uint64_t dptr_dma, rptr_dma;
649 sg_comp_t *gather_comp;
650 sg_comp_t *scatter_comp;
653 /* This falls under strict SG mode */
654 offset_vaddr = m_vaddr;
656 size = OFF_CTRL_LEN + iv_len;
658 m_vaddr = (uint8_t *)m_vaddr + size;
662 opcode.s.major |= CPT_DMA_MODE;
664 vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
666 if (likely(iv_len)) {
667 uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
669 uint64_t *src = fc_params->iv_buf;
674 *(uint64_t *)offset_vaddr =
675 rte_cpu_to_be_64(((uint64_t)encr_offset << 16) |
676 ((uint64_t)iv_offset << 8) |
677 ((uint64_t)auth_offset));
679 /* DPTR has SG list */
683 ((uint16_t *)in_buffer)[0] = 0;
684 ((uint16_t *)in_buffer)[1] = 0;
686 /* TODO Add error check if space will be sufficient */
687 gather_comp = (sg_comp_t *)((uint8_t *)m_vaddr + 8);
695 /* Offset control word that includes iv */
696 i = fill_sg_comp(gather_comp, i, offset_dma,
697 OFF_CTRL_LEN + iv_len);
700 size = inputlen - iv_len;
702 uint32_t aad_offset = aad_len ? passthrough_len : 0;
704 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
705 i = fill_sg_comp_from_buf_min(gather_comp, i,
709 i = fill_sg_comp_from_iov(gather_comp, i,
712 aad_buf, aad_offset);
715 if (unlikely(size)) {
716 CPT_LOG_DP_ERR("Insufficient buffer space,"
717 " size %d needed", size);
718 return ERR_BAD_INPUT_ARG;
721 ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
722 g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
725 * Output Scatter list
729 (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
732 if (likely(iv_len)) {
733 i = fill_sg_comp(scatter_comp, i,
734 offset_dma + OFF_CTRL_LEN,
738 /* output data or output data + digest*/
739 if (unlikely(flags & VALID_MAC_BUF)) {
740 size = outputlen - iv_len - mac_len;
742 uint32_t aad_offset =
743 aad_len ? passthrough_len : 0;
745 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
746 i = fill_sg_comp_from_buf_min(
752 i = fill_sg_comp_from_iov(scatter_comp,
761 return ERR_BAD_INPUT_ARG;
765 i = fill_sg_comp_from_buf(scatter_comp, i,
766 &fc_params->mac_buf);
769 /* Output including mac */
770 size = outputlen - iv_len;
772 uint32_t aad_offset =
773 aad_len ? passthrough_len : 0;
775 if (unlikely(flags & SINGLE_BUF_INPLACE)) {
776 i = fill_sg_comp_from_buf_min(
782 i = fill_sg_comp_from_iov(scatter_comp,
790 if (unlikely(size)) {
791 CPT_LOG_DP_ERR("Insufficient buffer"
792 " space, size %d needed",
794 return ERR_BAD_INPUT_ARG;
798 ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
799 s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
801 size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
803 /* This is DPTR len incase of SG mode */
804 vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
806 m_vaddr = (uint8_t *)m_vaddr + size;
810 /* cpt alternate completion address saved earlier */
811 req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
812 *req->alternate_caddr = ~((uint64_t)COMPLETION_CODE_INIT);
813 rptr_dma = c_dma - 8;
815 req->ist.ei1 = dptr_dma;
816 req->ist.ei2 = rptr_dma;
819 /* First 16-bit swap then 64-bit swap */
820 /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
821 * to eliminate all the swapping
823 vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
825 ctx_dma = fc_params->ctx_buf.dma_addr +
826 offsetof(struct cpt_ctx, fctx);
830 vq_cmd_w3.s.cptr = ctx_dma;
832 /* 16 byte aligned cpt res address */
833 req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
834 *req->completion_addr = COMPLETION_CODE_INIT;
835 req->comp_baddr = c_dma;
837 /* Fill microcode part of instruction */
838 req->ist.ei0 = vq_cmd_w0.u64;
839 req->ist.ei3 = vq_cmd_w3.u64;
847 static __rte_always_inline void *__hot
848 cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
849 fc_params_t *fc_params, void *op, int *ret_val)
851 struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
853 void *prep_req = NULL;
856 fc_type = ctx->fc_type;
858 /* Common api for rest of the ops */
859 if (likely(fc_type == FC_GEN)) {
860 ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
861 fc_params, op, &prep_req);
866 if (unlikely(!prep_req))
871 static __rte_always_inline int
872 cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
873 uint16_t key_len, uint16_t mac_len)
875 struct cpt_ctx *cpt_ctx = ctx;
876 mc_fc_context_t *fctx = &cpt_ctx->fctx;
877 uint64_t *ctrl_flags = NULL;
879 if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
884 /* No support for AEAD yet */
885 if (cpt_ctx->enc_cipher)
887 /* For ZUC/SNOW3G/Kasumi */
891 gen_key_snow3g(key, keyx);
892 memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
893 cpt_ctx->fc_type = ZUC_SNOW3G;
894 cpt_ctx->zsk_flags = 0x1;
898 memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
899 memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
900 cpt_ctx->fc_type = ZUC_SNOW3G;
901 cpt_ctx->zsk_flags = 0x1;
904 /* Kasumi ECB mode */
906 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
907 cpt_ctx->fc_type = KASUMI;
908 cpt_ctx->zsk_flags = 0x1;
911 memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
912 cpt_ctx->fc_type = KASUMI;
913 cpt_ctx->zsk_flags = 0x1;
918 cpt_ctx->mac_len = 4;
919 cpt_ctx->hash_type = type;
923 if (!(cpt_ctx->fc_type == FC_GEN && !type)) {
924 if (!cpt_ctx->fc_type || !cpt_ctx->enc_cipher)
925 cpt_ctx->fc_type = HASH_HMAC;
928 ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
929 *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
931 /* For GMAC auth, cipher must be NULL */
932 if (type == GMAC_TYPE)
933 CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
935 CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
936 CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
940 memset(cpt_ctx->auth_key, 0, sizeof(cpt_ctx->auth_key));
941 memcpy(cpt_ctx->auth_key, key, key_len);
942 cpt_ctx->auth_key_len = key_len;
943 memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
944 memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
945 memcpy(fctx->hmac.opad, key, key_len);
946 CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
948 *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
952 static __rte_always_inline int
953 fill_sess_aead(struct rte_crypto_sym_xform *xform,
954 struct cpt_sess_misc *sess)
956 struct rte_crypto_aead_xform *aead_form;
957 cipher_type_t enc_type = 0; /* NULL Cipher type */
958 auth_type_t auth_type = 0; /* NULL Auth type */
959 uint32_t cipher_key_len = 0;
960 uint8_t zsk_flag = 0, aes_gcm = 0;
961 aead_form = &xform->aead;
964 if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
965 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
966 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
967 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
968 } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
969 aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
970 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
971 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
973 CPT_LOG_DP_ERR("Unknown cipher operation\n");
976 switch (aead_form->algo) {
977 case RTE_CRYPTO_AEAD_AES_GCM:
982 case RTE_CRYPTO_AEAD_AES_CCM:
983 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
987 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
991 if (aead_form->key.length < cipher_key_len) {
992 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
993 (unsigned int long)aead_form->key.length);
996 sess->zsk_flag = zsk_flag;
997 sess->aes_gcm = aes_gcm;
998 sess->mac_len = aead_form->digest_length;
999 sess->iv_offset = aead_form->iv.offset;
1000 sess->iv_length = aead_form->iv.length;
1001 sess->aad_length = aead_form->aad_length;
1002 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
1004 cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
1005 aead_form->key.length, NULL);
1007 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
1012 static __rte_always_inline int
1013 fill_sess_cipher(struct rte_crypto_sym_xform *xform,
1014 struct cpt_sess_misc *sess)
1016 struct rte_crypto_cipher_xform *c_form;
1017 cipher_type_t enc_type = 0; /* NULL Cipher type */
1018 uint32_t cipher_key_len = 0;
1019 uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
1021 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1024 c_form = &xform->cipher;
1026 if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1027 sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
1028 else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
1029 sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
1031 CPT_LOG_DP_ERR("Unknown cipher operation\n");
1035 switch (c_form->algo) {
1036 case RTE_CRYPTO_CIPHER_AES_CBC:
1038 cipher_key_len = 16;
1040 case RTE_CRYPTO_CIPHER_3DES_CBC:
1041 enc_type = DES3_CBC;
1042 cipher_key_len = 24;
1044 case RTE_CRYPTO_CIPHER_DES_CBC:
1045 /* DES is implemented using 3DES in hardware */
1046 enc_type = DES3_CBC;
1049 case RTE_CRYPTO_CIPHER_AES_CTR:
1051 cipher_key_len = 16;
1054 case RTE_CRYPTO_CIPHER_NULL:
1058 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1059 enc_type = KASUMI_F8_ECB;
1060 cipher_key_len = 16;
1063 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1064 enc_type = SNOW3G_UEA2;
1065 cipher_key_len = 16;
1068 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1069 enc_type = ZUC_EEA3;
1070 cipher_key_len = 16;
1073 case RTE_CRYPTO_CIPHER_AES_XTS:
1075 cipher_key_len = 16;
1077 case RTE_CRYPTO_CIPHER_3DES_ECB:
1078 enc_type = DES3_ECB;
1079 cipher_key_len = 24;
1081 case RTE_CRYPTO_CIPHER_AES_ECB:
1083 cipher_key_len = 16;
1085 case RTE_CRYPTO_CIPHER_3DES_CTR:
1086 case RTE_CRYPTO_CIPHER_AES_F8:
1087 case RTE_CRYPTO_CIPHER_ARC4:
1088 CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
1092 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
1097 if (c_form->key.length < cipher_key_len) {
1098 CPT_LOG_DP_ERR("Invalid cipher params keylen %lu",
1099 (unsigned long) c_form->key.length);
1103 sess->zsk_flag = zsk_flag;
1104 sess->aes_gcm = aes_gcm;
1105 sess->aes_ctr = aes_ctr;
1106 sess->iv_offset = c_form->iv.offset;
1107 sess->iv_length = c_form->iv.length;
1108 sess->is_null = is_null;
1110 cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
1111 c_form->key.length, NULL);
1116 static __rte_always_inline int
1117 fill_sess_auth(struct rte_crypto_sym_xform *xform,
1118 struct cpt_sess_misc *sess)
1120 struct rte_crypto_auth_xform *a_form;
1121 auth_type_t auth_type = 0; /* NULL Auth type */
1122 uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
1124 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
1127 a_form = &xform->auth;
1129 if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1130 sess->cpt_op |= CPT_OP_AUTH_VERIFY;
1131 else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1132 sess->cpt_op |= CPT_OP_AUTH_GENERATE;
1134 CPT_LOG_DP_ERR("Unknown auth operation");
1138 if (a_form->key.length > 64) {
1139 CPT_LOG_DP_ERR("Auth key length is big");
1143 switch (a_form->algo) {
1144 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1146 case RTE_CRYPTO_AUTH_SHA1:
1147 auth_type = SHA1_TYPE;
1149 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1150 case RTE_CRYPTO_AUTH_SHA256:
1151 auth_type = SHA2_SHA256;
1153 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1154 case RTE_CRYPTO_AUTH_SHA512:
1155 auth_type = SHA2_SHA512;
1157 case RTE_CRYPTO_AUTH_AES_GMAC:
1158 auth_type = GMAC_TYPE;
1161 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1162 case RTE_CRYPTO_AUTH_SHA224:
1163 auth_type = SHA2_SHA224;
1165 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1166 case RTE_CRYPTO_AUTH_SHA384:
1167 auth_type = SHA2_SHA384;
1169 case RTE_CRYPTO_AUTH_MD5_HMAC:
1170 case RTE_CRYPTO_AUTH_MD5:
1171 auth_type = MD5_TYPE;
1173 case RTE_CRYPTO_AUTH_KASUMI_F9:
1174 auth_type = KASUMI_F9_ECB;
1176 * Indicate that direction needs to be taken out
1181 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1182 auth_type = SNOW3G_UIA2;
1185 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1186 auth_type = ZUC_EIA3;
1189 case RTE_CRYPTO_AUTH_NULL:
1193 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1194 case RTE_CRYPTO_AUTH_AES_CMAC:
1195 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1196 CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
1200 CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
1205 sess->zsk_flag = zsk_flag;
1206 sess->aes_gcm = aes_gcm;
1207 sess->mac_len = a_form->digest_length;
1208 sess->is_null = is_null;
1210 sess->auth_iv_offset = a_form->iv.offset;
1211 sess->auth_iv_length = a_form->iv.length;
1213 cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
1214 a_form->key.length, a_form->digest_length);
1222 static __rte_always_inline int
1223 fill_sess_gmac(struct rte_crypto_sym_xform *xform,
1224 struct cpt_sess_misc *sess)
1226 struct rte_crypto_auth_xform *a_form;
1227 cipher_type_t enc_type = 0; /* NULL Cipher type */
1228 auth_type_t auth_type = 0; /* NULL Auth type */
1229 uint8_t zsk_flag = 0, aes_gcm = 0;
1232 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
1235 a_form = &xform->auth;
1237 if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
1238 sess->cpt_op |= CPT_OP_ENCODE;
1239 else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
1240 sess->cpt_op |= CPT_OP_DECODE;
1242 CPT_LOG_DP_ERR("Unknown auth operation");
1246 switch (a_form->algo) {
1247 case RTE_CRYPTO_AUTH_AES_GMAC:
1249 auth_type = GMAC_TYPE;
1252 CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
1257 sess->zsk_flag = zsk_flag;
1258 sess->aes_gcm = aes_gcm;
1260 sess->iv_offset = a_form->iv.offset;
1261 sess->iv_length = a_form->iv.length;
1262 sess->mac_len = a_form->digest_length;
1263 ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
1265 cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
1266 a_form->key.length, NULL);
1267 cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
1272 static __rte_always_inline void *
1273 alloc_op_meta(struct rte_mbuf *m_src,
1276 struct rte_mempool *cpt_meta_pool)
1280 #ifndef CPT_ALWAYS_USE_SEPARATE_BUF
1281 if (likely(m_src && (m_src->nb_segs == 1))) {
1285 /* Check if tailroom is sufficient to hold meta data */
1286 tailroom = rte_pktmbuf_tailroom(m_src);
1287 if (likely(tailroom > len + 8)) {
1288 mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
1289 mphys = m_src->buf_physaddr + m_src->buf_len;
1293 buf->dma_addr = mphys;
1295 /* Indicate that this is a mbuf allocated mdata */
1296 mdata = (uint8_t *)((uint64_t)mdata | 1ull);
1301 RTE_SET_USED(m_src);
1304 if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
1308 buf->dma_addr = rte_mempool_virt2iova(mdata);
1315 * cpt_free_metabuf - free metabuf to mempool.
1316 * @param instance: pointer to instance.
1317 * @param objp: pointer to the metabuf.
1319 static __rte_always_inline void
1320 free_op_meta(void *mdata, struct rte_mempool *cpt_meta_pool)
1322 bool nofree = ((uintptr_t)mdata & 1ull);
1326 rte_mempool_put(cpt_meta_pool, mdata);
1329 static __rte_always_inline uint32_t
1330 prepare_iov_from_pkt(struct rte_mbuf *pkt,
1331 iov_ptr_t *iovec, uint32_t start_offset)
1334 void *seg_data = NULL;
1335 phys_addr_t seg_phys;
1336 int32_t seg_size = 0;
1343 if (!start_offset) {
1344 seg_data = rte_pktmbuf_mtod(pkt, void *);
1345 seg_phys = rte_pktmbuf_mtophys(pkt);
1346 seg_size = pkt->data_len;
1348 while (start_offset >= pkt->data_len) {
1349 start_offset -= pkt->data_len;
1353 seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
1354 seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
1355 seg_size = pkt->data_len - start_offset;
1361 iovec->bufs[index].vaddr = seg_data;
1362 iovec->bufs[index].dma_addr = seg_phys;
1363 iovec->bufs[index].size = seg_size;
1367 while (unlikely(pkt != NULL)) {
1368 seg_data = rte_pktmbuf_mtod(pkt, void *);
1369 seg_phys = rte_pktmbuf_mtophys(pkt);
1370 seg_size = pkt->data_len;
1374 iovec->bufs[index].vaddr = seg_data;
1375 iovec->bufs[index].dma_addr = seg_phys;
1376 iovec->bufs[index].size = seg_size;
1383 iovec->buf_cnt = index;
1387 static __rte_always_inline uint32_t
1388 prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
1393 void *seg_data = NULL;
1394 phys_addr_t seg_phys;
1395 uint32_t seg_size = 0;
1398 seg_data = rte_pktmbuf_mtod(pkt, void *);
1399 seg_phys = rte_pktmbuf_mtophys(pkt);
1400 seg_size = pkt->data_len;
1403 if (likely(!pkt->next)) {
1404 uint32_t headroom, tailroom;
1406 *flags |= SINGLE_BUF_INPLACE;
1407 headroom = rte_pktmbuf_headroom(pkt);
1408 tailroom = rte_pktmbuf_tailroom(pkt);
1409 if (likely((headroom >= 24) &&
1411 /* In 83XX this is prerequivisit for Direct mode */
1412 *flags |= SINGLE_BUF_HEADTAILROOM;
1414 param->bufs[0].vaddr = seg_data;
1415 param->bufs[0].dma_addr = seg_phys;
1416 param->bufs[0].size = seg_size;
1419 iovec = param->src_iov;
1420 iovec->bufs[index].vaddr = seg_data;
1421 iovec->bufs[index].dma_addr = seg_phys;
1422 iovec->bufs[index].size = seg_size;
1426 while (unlikely(pkt != NULL)) {
1427 seg_data = rte_pktmbuf_mtod(pkt, void *);
1428 seg_phys = rte_pktmbuf_mtophys(pkt);
1429 seg_size = pkt->data_len;
1434 iovec->bufs[index].vaddr = seg_data;
1435 iovec->bufs[index].dma_addr = seg_phys;
1436 iovec->bufs[index].size = seg_size;
1443 iovec->buf_cnt = index;
1447 static __rte_always_inline void *
1448 fill_fc_params(struct rte_crypto_op *cop,
1449 struct cpt_sess_misc *sess_misc,
1454 struct rte_crypto_sym_op *sym_op = cop->sym;
1457 uint32_t mc_hash_off;
1459 uint64_t d_offs, d_lens;
1460 void *prep_req = NULL;
1461 struct rte_mbuf *m_src, *m_dst;
1462 uint8_t cpt_op = sess_misc->cpt_op;
1463 uint8_t zsk_flag = sess_misc->zsk_flag;
1464 uint8_t aes_gcm = sess_misc->aes_gcm;
1465 uint16_t mac_len = sess_misc->mac_len;
1466 #ifdef CPT_ALWAYS_USE_SG_MODE
1467 uint8_t inplace = 0;
1469 uint8_t inplace = 1;
1471 fc_params_t fc_params;
1472 char src[SRC_IOV_SIZE];
1473 char dst[SRC_IOV_SIZE];
1475 struct cptvf_meta_info *cpt_m_info =
1476 (struct cptvf_meta_info *)(*mdata_ptr);
1478 if (likely(sess_misc->iv_length)) {
1479 flags |= VALID_IV_BUF;
1480 fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
1481 uint8_t *, sess_misc->iv_offset);
1482 if (sess_misc->aes_ctr &&
1483 unlikely(sess_misc->iv_length != 16)) {
1484 memcpy((uint8_t *)iv_buf,
1485 rte_crypto_op_ctod_offset(cop,
1486 uint8_t *, sess_misc->iv_offset), 12);
1487 iv_buf[3] = rte_cpu_to_be_32(0x1);
1488 fc_params.iv_buf = iv_buf;
1493 fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
1495 sess_misc->auth_iv_offset);
1496 if (zsk_flag == K_F9) {
1497 CPT_LOG_DP_ERR("Should not reach here for "
1500 if (zsk_flag != ZS_EA)
1503 m_src = sym_op->m_src;
1504 m_dst = sym_op->m_dst;
1511 d_offs = sym_op->aead.data.offset;
1512 d_lens = sym_op->aead.data.length;
1513 mc_hash_off = sym_op->aead.data.offset +
1514 sym_op->aead.data.length;
1516 aad_data = sym_op->aead.aad.data;
1517 aad_len = sess_misc->aad_length;
1518 if (likely((aad_data + aad_len) ==
1519 rte_pktmbuf_mtod_offset(m_src,
1521 sym_op->aead.data.offset))) {
1522 d_offs = (d_offs - aad_len) | (d_offs << 16);
1523 d_lens = (d_lens + aad_len) | (d_lens << 32);
1525 fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
1526 fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
1527 fc_params.aad_buf.size = aad_len;
1528 flags |= VALID_AAD_BUF;
1530 d_offs = d_offs << 16;
1531 d_lens = d_lens << 32;
1534 salt = fc_params.iv_buf;
1535 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
1536 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
1537 sess_misc->salt = *(uint32_t *)salt;
1539 fc_params.iv_buf = salt + 4;
1540 if (likely(mac_len)) {
1541 struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
1547 /* hmac immediately following data is best case */
1548 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1550 (uint8_t *)sym_op->aead.digest.data)) {
1551 flags |= VALID_MAC_BUF;
1552 fc_params.mac_buf.size = sess_misc->mac_len;
1553 fc_params.mac_buf.vaddr =
1554 sym_op->aead.digest.data;
1555 fc_params.mac_buf.dma_addr =
1556 sym_op->aead.digest.phys_addr;
1561 d_offs = sym_op->cipher.data.offset;
1562 d_lens = sym_op->cipher.data.length;
1563 mc_hash_off = sym_op->cipher.data.offset +
1564 sym_op->cipher.data.length;
1565 d_offs = (d_offs << 16) | sym_op->auth.data.offset;
1566 d_lens = (d_lens << 32) | sym_op->auth.data.length;
1568 if (mc_hash_off < (sym_op->auth.data.offset +
1569 sym_op->auth.data.length)){
1570 mc_hash_off = (sym_op->auth.data.offset +
1571 sym_op->auth.data.length);
1573 /* for gmac, salt should be updated like in gcm */
1574 if (unlikely(sess_misc->is_gmac)) {
1576 salt = fc_params.iv_buf;
1577 if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
1578 cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
1579 sess_misc->salt = *(uint32_t *)salt;
1581 fc_params.iv_buf = salt + 4;
1583 if (likely(mac_len)) {
1586 m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
1590 /* hmac immediately following data is best case */
1591 if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
1593 (uint8_t *)sym_op->auth.digest.data)) {
1594 flags |= VALID_MAC_BUF;
1595 fc_params.mac_buf.size =
1597 fc_params.mac_buf.vaddr =
1598 sym_op->auth.digest.data;
1599 fc_params.mac_buf.dma_addr =
1600 sym_op->auth.digest.phys_addr;
1605 fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
1606 fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
1608 if (unlikely(sess_misc->is_null || sess_misc->cpt_op == CPT_OP_DECODE))
1611 if (likely(!m_dst && inplace)) {
1612 /* Case of single buffer without AAD buf or
1613 * separate mac buf in place and
1616 fc_params.dst_iov = fc_params.src_iov = (void *)src;
1618 if (unlikely(prepare_iov_from_pkt_inplace(m_src,
1621 CPT_LOG_DP_ERR("Prepare inplace src iov failed");
1627 /* Out of place processing */
1628 fc_params.src_iov = (void *)src;
1629 fc_params.dst_iov = (void *)dst;
1631 /* Store SG I/O in the api for reuse */
1632 if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
1633 CPT_LOG_DP_ERR("Prepare src iov failed");
1638 if (unlikely(m_dst != NULL)) {
1641 /* Try to make room as much as src has */
1642 m_dst = sym_op->m_dst;
1643 pkt_len = rte_pktmbuf_pkt_len(m_dst);
1645 if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
1646 pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
1647 if (!rte_pktmbuf_append(m_dst, pkt_len)) {
1648 CPT_LOG_DP_ERR("Not enough space in "
1656 if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
1657 CPT_LOG_DP_ERR("Prepare dst iov failed for "
1662 fc_params.dst_iov = (void *)src;
1666 if (likely(flags & SINGLE_BUF_HEADTAILROOM))
1667 mdata = alloc_op_meta(m_src,
1668 &fc_params.meta_buf,
1669 cpt_m_info->cptvf_op_sb_mlen,
1670 cpt_m_info->cptvf_meta_pool);
1672 mdata = alloc_op_meta(NULL,
1673 &fc_params.meta_buf,
1674 cpt_m_info->cptvf_op_mlen,
1675 cpt_m_info->cptvf_meta_pool);
1677 if (unlikely(mdata == NULL)) {
1678 CPT_LOG_DP_ERR("Error allocating meta buffer for request");
1682 op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
1683 op[0] = (uintptr_t)mdata;
1684 op[1] = (uintptr_t)cop;
1685 op[2] = op[3] = 0; /* Used to indicate auth verify */
1686 space += 4 * sizeof(uint64_t);
1688 fc_params.meta_buf.vaddr = (uint8_t *)op + space;
1689 fc_params.meta_buf.dma_addr += space;
1690 fc_params.meta_buf.size -= space;
1692 /* Finally prepare the instruction */
1693 if (cpt_op & CPT_OP_ENCODE)
1694 prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
1695 &fc_params, op, op_ret);
1697 if (unlikely(!prep_req))
1698 free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
1703 #endif /*_CPT_UCODE_H_ */